大神是如何走过来的,六个项目代码了解一下(上)
1、抓取知乎图片,只用30行代码:# 视频资料分享 QQ群 519970686
importre
from seleniumimportwebdriver
importtime
importurllib.request
driver = webdriver.Chrome()
driver.maximize_window()
driver.get("https://www.zhihu.com/question/29134042")
i =0
whilei <10:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(2)
try:
driver.find_element_by_css_selector('button.QuestionMainAction').click()
print("page"+ str(i))
time.sleep(1)
except:
break
result_raw = driver.page_source
content_list = re.findall("img src="(.+?)" ", str(result_raw))
n =0
whilen < len(content_list):
i = time.time()
local = (r"%s.jpg"% (i))
urllib.request.urlretrieve(content_list[n], local)
print("编号:"+ str(i))
n = n +1
2、没事闲的时候,听两个聊天机器人互相聊天:# 视频资料分享 QQ群 519970686
fromtimeimportsleep
import requests
s= input("请主人输入话题:")
whileTrue:
resp = requests.post("http://www.tuling123.com/openapi/api",data={"key":"4fede3c4384846b9a7d0456a5e1e2943","info":s, })
resp = resp.json()
sleep(1)
print('小鱼:', resp['text'])
s= resp['text']
resp = requests.get("http://api.qingyunke.com/api.php", {'key':'free','appid':0,'msg':s})
resp.encoding ='utf8'
resp = resp.json()
sleep(1)
print('菲菲:', resp['content'])
网上还有一个据说智商比较高的小i机器人,用爬虫的功能来实现一下:
import urllib.request
import re
whileTrue:
x= input("主人:")
x= urllib.parse.quote(x)
link= urllib.request.urlopen(
"http://nlp.xiaoi.com/robot/webrobot?&callback=__webrobot_processMsg&data=%7B%22sessionId%22%3A%22ff725c236e5245a3ac825b2dd88a7501%22%2C%22robotId%22%3A%22webbot%22%2C%22userId%22%3A%227cd29df3450745fbbdcf1a462e6c58e6%22%2C%22body%22%3A%7B%22content%22%3A%22"+x+"%22%7D%2C%22type%22%3A%22txt%22%7D")
html_doc = link.read().decode()
reply_list = re.findall(r'"content":"(.+?)\r\n"', html_doc)
print("小i:"+ reply_list[-1])
3、AI分析唐诗的作者是李白还是杜甫:# 视频资料分享 QQ群 519970686
importjieba
fromnltk.classifyimportNaiveBayesClassifier
# 需要提前把李白的诗收集一下,放在libai.txt文本中。
text1 = open(r"libai.txt","rb").read()
list1 = jieba.cut(text1)
result1 =" ".join(list1)
# 需要提前把杜甫的诗收集一下,放在dufu.txt文本中。
text2 = open(r"dufu.txt","rb").read()
list2 = jieba.cut(text2)
result2 =" ".join(list2)
# 数据准备
libai = result1
dufu = result2
# 特征提取
defword_feats(words):
returndict([(word,True)forwordinwords])
libai_features = [(word_feats(lb),'lb')forlbinlibai]
dufu_features = [(word_feats(df),'df')fordfindufu]
train_set = libai_features + dufu_features
# 训练决策
classifier = NaiveBayesClassifier.train(train_set)
# 分析测试
sentence = input("请输入一句你喜欢的诗:")
print("")
seg_list = jieba.cut(sentence)
result1 =" ".join(seg_list)
words = result1.split(" ")
# 统计结果
lb =0
df =0
forwordinwords:
classResult = classifier.classify(word_feats(word))
ifclassResult =='lb':
lb = lb +1
ifclassResult =='df':
df = df +1
# 呈现比例
x = float(str(float(lb) / len(words)))
y = float(str(float(df) / len(words)))
print('李白的可能性:%.2f%%'% (x *100))
print('杜甫的可能性:%.2f%%'% (y *100))
真心想学习Python的朋友,群内有视频资料分享,欢迎一起加入Python的浪潮,逐浪前行