python-Pycurl记录
2018-07-18 本文已影响0人
yunpiao
Pycurl
Pycurl包是一个libcurl的Python接口,它是由C语言编写的。与urllib相比,它的速度要快很多。
Libcurl 是一个支持FTP, FTPS, HTTP, HTTPS, GOPHER, TELNET, DICT, FILE 和 LDAP的客户端URL传输库.libcurl也支持HTTPS认证,HTTP POST,HTTP PUT,FTP上传,代理,Cookies,基本身份验证,FTP文件断点继传,HTTP代理通道等等。
import pycurl
import StringIO
c = pycurl.Curl() #创建一个同libcurl中的CURL处理器相对应的Curl对象
b = StringIO.StringIO()
#c.setopt(c.POST, 1)
c.setopt(pycurl.URL, url) #设置要访问的网址 url = "http://www.cnn.com"
#写的回调
c.setopt(pycurl.WRITEFUNCTION, b.write)
c.setopt(pycurl.FOLLOWLOCATION, 1) #参数有1、2
#最大重定向次数,可以预防重定向陷阱
c.setopt(pycurl.MAXREDIRS, 5)
#连接超时设置
c.setopt(pycurl.CONNECTTIMEOUT, 60) #链接超时
# c.setopt(pycurl.TIMEOUT, 300) #下载超时
# c.setopt(pycurl.HEADER, True)
# c.setopt(c.HTTPHEADER, ["Content-Type: application/x-www-form-urlencoded","X-Requested-With:XMLHttpRequest","Cookie:"+set_cookie[0]])
#模拟浏览器
c.setopt(pycurl.USERAGENT, "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)")
# c.setopt(pycurl.AUTOREFERER,1)
# c.setopt(c.REFERER, url)
# cookie设置
# Option -b/--cookie <name=string/file> Cookie string or file to read cookies from
# Note: must be a string, not a file object.
# c.setopt(pycurl.COOKIEFILE, "cookie_file_name")
# Option -c/--cookie-jar Write cookies to this file after operation
# Note: must be a string, not a file object.
# c.setopt(pycurl.COOKIEJAR, "cookie_file_name")
# Option -d/--data HTTP POST data
post_data_dic = {"name":"value"}
c.setopt(c.POSTFIELDS, urllib.urlencode(post_data_dic))
#设置代理
# c.setopt(pycurl.PROXY, ‘http://11.11.11.11:8080′)
# c.setopt(pycurl.PROXYUSERPWD, ‘aaa:aaa’)
#不明确作用
# c.setopt(pycurl.HTTPPROXYTUNNEL,1) #隧道代理
# c.setopt(pycurl.NOSIGNAL, 1)
#设置post请求, 上传文件的字段名 上传的文件
#post_file = "/home/ubuntu/avatar.jpg"
c.setopt(c.HTTPPOST, [("textname", (c.FORM_FILE, post_file))])
# 调试回调.调试信息类型是一个调试信 息的整数标示类型.在这个回调被调用时VERBOSE选项必须可用
# c.setopt(c.VERBOSE, 1) #verbose 详细
# c.setopt(c.DEBUGFUNCTION, test)
# f = open("body", "wb")
# c.setopt(c.WRITEDATA, f)
# h = open("header", "wb")
# c.setopt(c.WRITEHEADER, h)
# print "Header is in file 'header', body is in file 'body'"
# f.close()
# h.close()
# c.setopt(c.NOPROGRESS, 0)
# c.setopt(c.PROGRESSFUNCTION, progress)
# c.setopt(c.OPT_FILETIME, 1)
#访问,阻塞到访问结束
c.perform() #执行上述访问网址的操作
print "HTTP-code:", c.getinfo(c.HTTP_CODE) #打印出 200(HTTP状态码)
print "Total-time:", c.getinfo(c.TOTAL_TIME)
print "Download speed: %.2f bytes/second" % c.getinfo(c.SPEED_DOWNLOAD)
print "Document size: %d bytes" % c.getinfo(c.SIZE_DOWNLOAD)
print "Effective URL:", c.getinfo(c.EFFECTIVE_URL)
print "Content-type:", c.getinfo(c.CONTENT_TYPE)
print "Namelookup-time:", c.getinfo(c.NAMELOOKUP_TIME)
print "Redirect-time:", c.getinfo(c.REDIRECT_TIME)
print "Redirect-count:", c.getinfo(c.REDIRECT_COUNT)
# epoch = c.getinfo(c.INFO_FILETIME)
#print "Filetime: %d (%s)" % (epoch, time.ctime(epoch))
html = b.getvalue()
print(html)
b.close()
c.close()
import re
import requests
import grequests
proxies = {
"http": "http://127.0.0.1:1080",
}
pattern = re.compile(r'((https|http|ftp|rtsp|mms)?:\/\/)[^\s]+')
def get_url(html):
m = re.findall(r'(https|http?:\/\/[^\s]+.com|cn|org)', html)
# m = re.findall(r'^https?://[\d\-a-zA-Z]+(\.[\d\-a-zA-Z]+)*/?$', html)
return (m)
html= requests.get("http://www.best918.com/").text
urls = get_url(html)
gen = (grequests.get(i, proxies=proxies, timeout=3) for i in urls)
for i in grequests.imap(gen, size=200):
print(i.url)
for k in get_url(i.text):
urls.append(k)
import grequests
import time
start_time = time.time()
proxies = {
"http": "http://127.0.0.1:1080",
"https": "https://127.0.0.1:1080",
}
urls = [
"http://www.baidu.com",
"http://www.google.com",
]
rs = (grequests.get(u, timeout=3, proxies=proxies) for u in urls)
for r in grequests.imap(rs, size=200):
print(r.content)
duration = time.time() - start_time
print ('pycurl takes %s seconds ' % (duration))