colly文档
c.OnRequest(func(r *colly.Request) {
fmt.Println("Visiting", r.URL)
})
c.OnError(func(_ *colly.Response, err error) {
log.Println("Something went wrong:", err)
})
c.OnResponse(func(r *colly.Response) {
fmt.Println("Visited", r.Request.URL)
})
c.OnHTML("a[href]", func(e *colly.HTMLElement) {
e.Request.Visit(e.Attr("href"))
})
c.OnHTML("tr td:nth-of-type(1)", func(e *colly.HTMLElement) {
fmt.Println("First column of a table row:", e.Text)
})
c.OnXML("//h1", func(e *colly.XMLElement) {
fmt.Println(e.Text)
})
c.OnScraped(func(r *colly.Response) {
fmt.Println("Finished", r.Request.URL)
})
- OnRequest
Called before a request
c.OnRequest(func(r *colly.Request) {
fmt.Println("Visiting", r.URL.String())
})
- OnError
Called if error occured during the request
c.OnError(func(r *colly.Response, err error) {
fmt.Println("Request URL:", r.Request.URL, "failed with response:", r, "\nError:", err)
})
- OnResponse
Called after response received
c.OnResponse(func(r *colly.Response) {
log.Println("response received", r.StatusCode)
})
-
OnHTML
Called right after OnResponse if the received content is HTML -
OnXML
Called right after OnHTML if the received content is HTML or XML -
OnScraped
Called after OnXML callbacks
1.Collector参数
设置User-Agent
colly.NewCollector( colly.UserAgent("xy"))
c2 := colly.NewCollector();c2.UserAgent="xy"
动态设置User-Agent
c := colly.NewCollector();c.OnRequest(func(r *colly.Request){r.Headers.Set("User-Agent", RandomString()) })
设置url不去重
colly.AllowURLRevisit()
c2 := colly.NewCollector();c2.AllowURLRevisit=true
收集器配置项
ALLOWED_DOMAINS (comma separated list of domains) // 允许的域名
c := colly.NewCollector(
// Visit only domains: hackerspaces.org, wiki.hackerspaces.org
colly.AllowedDomains("hackerspaces.org", "wiki.hackerspaces.org"),
)
CACHE_DIR (string) // 缓存dir
colly.NewCollector(
colly.CacheDir("./cache"))
DETECT_CHARSET (y/n)
DISABLE_COOKIES (y/n)
DISALLOWED_DOMAINS (comma separated list of domains)
IGNORE_ROBOTSTXT (y/n)
MAX_BODY_SIZE (int)
MAX_DEPTH (int - 0 means infinite)
c := colly.NewCollector(
// MaxDepth is 1, so only the links on the scraped page
// is visited, and no further links are followed
colly.MaxDepth(1),
)
PARSE_HTTP_ERROR_RESPONSE (y/n)
USER_AGENT (string)
// HTTP设置
c := colly.NewCollector()
c.WithTransport(&http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
开启debug
import (
"github.com/gocolly/colly"
"github.com/gocolly/colly/debug"
)
func main() {
c := colly.NewCollector(colly.Debugger(&debug.LogDebugger{}))
// [..]
}
设置代理
package main
import (
"github.com/gocolly/colly"
"github.com/gocolly/colly/proxy"
)
func main() {
c := colly.NewCollector()
if p, err := proxy.RoundRobinProxySwitcher(
"socks5://127.0.0.1:1337",
"socks5://127.0.0.1:1338",
"http://127.0.0.1:8080",
); err == nil {
c.SetProxyFunc(p)
}
// ...
}
自定义代理切换器
var proxies []*url.URL = []*url.URL{
&url.URL{Host: "127.0.0.1:8080"},
&url.URL{Host: "127.0.0.1:8081"},
}
func randomProxySwitcher(_ *http.Request) (*url.URL, error) {
return proxies[random.Intn(len(proxies))], nil
}
// ...
c.SetProxyFunc(randomProxySwitcher)```
代理完整DEMO
package main
import (
"bytes"
"log"
"github.com/gocolly/colly"
"github.com/gocolly/colly/proxy"
)
func main() {
// Instantiate default collector
c := colly.NewCollector(colly.AllowURLRevisit())
// Rotate two socks5 proxies
rp, err := proxy.RoundRobinProxySwitcher("socks5://127.0.0.1:1337", "socks5://127.0.0.1:1338")
if err != nil {
log.Fatal(err)
}
c.SetProxyFunc(rp)
// Print the response
c.OnResponse(func(r *colly.Response) {
log.Printf("%s\n", bytes.Replace(r.Body, []byte("\n"), nil, -1))
})
// Fetch httpbin.org/ip five times
for i := 0; i < 5; i++ {
c.Visit("https://httpbin.org/ip")
}
}
使用Clone方法复制相似配置的收集器
c := colly.NewCollector(
colly.UserAgent("myUserAgent"),
colly.AllowedDomains("foo.com", "bar.com"),
)
// Custom User-Agent and allowed domains are cloned to c2
c2 := c.Clone()
使用context传递上下文
c.OnResponse(func(r *colly.Response) {
r.Ctx.Put(r.Headers.Get("Custom-Header"))
c2.Request("GET", "https://foo.com/", nil, r.Ctx, nil)
})
特别注意 设置代理与设置缓存文件目前是冲突的.
禁止 keep-alive
c := colly.NewCollector()
c.WithTransport(&http.Transport{
DisableKeepAlives: true,
})
异步收集
c := colly.NewCollector(
colly.Async(true),
)
Collector.Async = true
// 记住使用c.Wait()
异步收集完整Demo
import (
"fmt"
"github.com/gocolly/colly"
)
func main() {
// Instantiate default collector
c := colly.NewCollector(
// MaxDepth is 2, so only the links on the scraped page
// and links on those pages are visited
colly.MaxDepth(2),
colly.Async(true),
)
// Limit the maximum parallelism to 2
// This is necessary if the goroutines are dynamically
// created to control the limit of simultaneous requests.
//
// Parallelism can be controlled also by spawning fixed
// number of go routines.
c.Limit(&colly.LimitRule{DomainGlob: "*", Parallelism: 2})
// On every a element which has href attribute call callback
c.OnHTML("a[href]", func(e *colly.HTMLElement) {
link := e.Attr("href")
// Print link
fmt.Println(link)
// Visit link found on page on a new thread
e.Request.Visit(link)
})
// Start scraping on https://en.wikipedia.org
c.Visit("https://en.wikipedia.org/")
// Wait until threads are finished
c.Wait()
}
colly自带的插件
import (
"log"
"github.com/gocolly/colly"
"github.com/gocolly/colly/extensions"
)
func main() {
c := colly.NewCollector()
visited := false
extensions.RandomUserAgent(c) // 随机UA
extensions.Referrer(c) // referer自动填写
c.OnResponse(func(r *colly.Response) {
log.Println(string(r.Body))
if !visited {
visited = true
r.Request.Visit("/get?q=2")
}
})
c.Visit("http://httpbin.org/get")
}
目前支持的插件
func RandomUserAgent
func Referer
func URLLengthFilter
Post表单
func generateFormData() map[string][]byte {
f, _ := os.Open("gocolly.jpg")
defer f.Close()
imgData, _ := ioutil.ReadAll(f)
return map[string][]byte{
"firstname": []byte("one"),
"lastname": []byte("two"),
"email": []byte("onetwo@example.com"),
"file": imgData,
}
}
c.PostMultipart("http://localhost:8080/", generateFormData())
PostJson
c.OnRequest(func(r *colly.Request) {
r.Headers.Set("Content-Type", "application/json;charset=UTF-8")
})
c.PostRaw(fmt.Sprintf(amac_list_post_url, page), []byte("{}"))
队列Queue
package main
import (
"fmt"
"github.com/gocolly/colly"
"github.com/gocolly/colly/queue"
)
func main() {
url := "https://httpbin.org/delay/1"
// Instantiate default collector
c := colly.NewCollector()
// create a request queue with 2 consumer threads
q, _ := queue.New(
2, // Number of consumer threads
&queue.InMemoryQueueStorage{MaxSize: 10000}, // Use default queue storage
)
c.OnRequest(func(r *colly.Request) {
fmt.Println("visiting", r.URL)
})
for i := 0; i < 5; i++ {
// Add URLs to the queue
q.AddURL(fmt.Sprintf("%s?n=%d", url, i))
}
// Consume URLs
q.Run(c)
}
随机延迟
package main
import (
"fmt"
"time"
"github.com/gocolly/colly"
"github.com/gocolly/colly/debug"
)
func main() {
url := "https://httpbin.org/delay/2"
// Instantiate default collector
c := colly.NewCollector(
// Attach a debugger to the collector
colly.Debugger(&debug.LogDebugger{}),
colly.Async(true),
)
// Limit the number of threads started by colly to two
// when visiting links which domains' matches "*httpbin.*" glob
c.Limit(&colly.LimitRule{
DomainGlob: "*httpbin.*",
Parallelism: 2,
RandomDelay: 5 * time.Second,
})
// Start scraping in four threads on https://httpbin.org/delay/2
for i := 0; i < 4; i++ {
c.Visit(fmt.Sprintf("%s?n=%d", url, i))
}
// Start scraping on https://httpbin.org/delay/2
c.Visit(url)
// Wait until threads are finished
c.Wait()
}
并发限制
package main
import (
"fmt"
"github.com/gocolly/colly"
"github.com/gocolly/colly/debug"
)
func main() {
url := "https://httpbin.org/delay/2"
// Instantiate default collector
c := colly.NewCollector(
// Turn on asynchronous requests
colly.Async(true),
// Attach a debugger to the collector
colly.Debugger(&debug.LogDebugger{}),
)
// Limit the number of threads started by colly to two
// when visiting links which domains' matches "*httpbin.*" glob
c.Limit(&colly.LimitRule{
DomainGlob: "*httpbin.*",
Parallelism: 2,
//Delay: 5 * time.Second,
})
// Start scraping in five threads on https://httpbin.org/delay/2
for i := 0; i < 5; i++ {
c.Visit(fmt.Sprintf("%s?n=%d", url, i))
}
// Wait until threads are finished
c.Wait()
}
Redis做队列
package main
import (
"log"
"github.com/gocolly/colly"
"github.com/gocolly/colly/queue"
"github.com/gocolly/redisstorage"
)
func main() {
urls := []string{
"http://httpbin.org/",
"http://httpbin.org/ip",
"http://httpbin.org/cookies/set?a=b&c=d",
"http://httpbin.org/cookies",
}
c := colly.NewCollector()
// create the redis storage
storage := &redisstorage.Storage{
Address: "127.0.0.1:6379",
Password: "",
DB: 0,
Prefix: "httpbin_test",
}
// add storage to the collector
err := c.SetStorage(storage)
if err != nil {
panic(err)
}
// delete previous data from storage
if err := storage.Clear(); err != nil {
log.Fatal(err)
}
// close redis client
defer storage.Client.Close()
// create a new request queue with redis storage backend
q, _ := queue.New(2, storage)
c.OnResponse(func(r *colly.Response) {
log.Println("Cookies:", c.Cookies(r.Request.URL.String()))
})
// add URLs to the queue
for _, u := range urls {
q.AddURL(u)
}
// consume requests
q.Run(c)
}
上下文管理
package main
import (
"fmt"
"github.com/gocolly/colly"
)
func main() {
// Instantiate default collector
c := colly.NewCollector()
// Before making a request put the URL with
// the key of "url" into the context of the request
c.OnRequest(func(r *colly.Request) {
r.Ctx.Put("url", r.URL.String())
})
// After making a request get "url" from
// the context of the request
c.OnResponse(func(r *colly.Response) {
fmt.Println(r.Ctx.Get("url"))
})
// Start scraping on https://en.wikipedia.org
c.Visit("https://en.wikipedia.org/")
}
爬虫服务器
package main
import (
"encoding/json"
"log"
"net/http"
"github.com/gocolly/colly"
)
type pageInfo struct {
StatusCode int
Links map[string]int
}
func handler(w http.ResponseWriter, r *http.Request) {
URL := r.URL.Query().Get("url")
if URL == "" {
log.Println("missing URL argument")
return
}
log.Println("visiting", URL)
c := colly.NewCollector()
p := &pageInfo{Links: make(map[string]int)}
// count links
c.OnHTML("a[href]", func(e *colly.HTMLElement) {
link := e.Request.AbsoluteURL(e.Attr("href"))
if link != "" {
p.Links[link]++
}
})
// extract status code
c.OnResponse(func(r *colly.Response) {
log.Println("response received", r.StatusCode)
p.StatusCode = r.StatusCode
})
c.OnError(func(r *colly.Response, err error) {
log.Println("error:", r.StatusCode, err)
p.StatusCode = r.StatusCode
})
c.Visit(URL)
// dump results
b, err := json.Marshal(p)
if err != nil {
log.Println("failed to serialize response:", err)
return
}
w.Header().Add("Content-Type", "application/json")
w.Write(b)
}
func main() {
// example usage: curl -s 'http://127.0.0.1:7171/?url=http://go-colly.org/'
addr := ":7171"
http.HandleFunc("/", handler)
log.Println("listening on", addr)
log.Fatal(http.ListenAndServe(addr, nil))
}
URL正则过滤
package main
import (
"fmt"
"regexp"
"github.com/gocolly/colly"
)
func main() {
// Instantiate default collector
c := colly.NewCollector(
// Visit only root url and urls which start with "e" or "h" on httpbin.org
colly.URLFilters(
regexp.MustCompile("http://httpbin\\.org/(|e.+)$"),
regexp.MustCompile("http://httpbin\\.org/h.+"),
),
)
// On every a element which has href attribute call callback
c.OnHTML("a[href]", func(e *colly.HTMLElement) {
link := e.Attr("href")
// Print link
fmt.Printf("Link found: %q -> %s\n", e.Text, link)
// Visit link found on page
// Only those links are visited which are matched by any of the URLFilter regexps
c.Visit(e.Request.AbsoluteURL(link))
})
// Before making a request print "Visiting ..."
c.OnRequest(func(r *colly.Request) {
fmt.Println("Visiting", r.URL.String())
})
// Start scraping on http://httpbin.org
c.Visit("http://httpbin.org/")
}