离签约作者的路谈写作写作数据分析

绘制你的简书曲线

2016-11-10  本文已影响216人  treelake

上次看到 彭小六大神 弄了个他自己的简书文章-关注数曲线,图如下


Scrapy爬虫

# -*- coding: utf-8 -*-
import scrapy
# Run
# scrapy runspider num2.py -o 2.json

yourcookie = 'xxxxxxxxx' # 由浏览器F12查看cookie获取remember_user_token这一小段

class Num1Spider(scrapy.Spider):
    name = "num2"
    allowed_domains = ["jianshu.com"]
    
    info_url = 'http://www.jianshu.com/notifications?all=true&page=%d'
    article_url = 'http://www.jianshu.com/users/66f24f2c0f36/latest_articles?page=%d'
    Cookie = {
            'remember_user_token' : yourcookie
            }
    
    headers = {
        "User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36",
    }
    
    num = 0 # 页数

    def start_requests(self): # 默认的开始函数,用于提供要爬取的链接
        
        while self.num < 30: # 我的页数小于30,比较少,共花费1.991004秒
            self.num += 1
            yield scrapy.Request(self.info_url % self.num,
                             headers = self.headers,
                             cookies = self.Cookie,
                             callback = self.parse) # 爬取消息提醒页面
                             
            yield scrapy.Request(self.article_url % self.num,
                             headers = self.headers,
                             cookies = self.Cookie,
                             callback = self.article_parse) # 爬取最新文章页面
    
    def parse(self, response):
        time = response.css('li .time::text').extract()
        token = response.css('li span + i').extract()
        for t,k in zip(time, token):
            if 'fa-heart' in k:
                yield {'time': t, 'token': 'heart'}
            elif 'fa-check' in k:
                yield {'time': t, 'token': 'check'}
            else:
                pass
                # 统一数据产出格式为 {'time': t, 'token': 'x'}
    def article_parse(self, response):
        # from scrapy.shell import inspect_response
        # inspect_response(response, self)
        for t in response.css('.time::attr("data-shared-at")').extract():
            if not t : break
            yield {'time': t.replace('T', ' '), 'token': 'article'}

数据后处理

# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import seaborn as sns 
#一旦导入了seaborn,matplotlib的默认作图风格就会被覆盖成seaborn的格式
import json
from collections import defaultdict
import datetime

like = defaultdict(int) # 生成整型默认字典,即字典键的默认值为0
focus = defaultdict(int)
article = defaultdict(int)

with open('2.json','r') as f: # 解析json,统计数据,即每天有多少喜欢,关注和文章
    data = json.load(f)
    for i in data:
        time = i['time'].split(' ')[0]
        if i['token'] == 'heart':
            like[time] += 1
        elif i['token'] == 'check':
            focus[time] += 1
        elif i['token'] == 'article':
            article[time] += 1

#datetime.datetime.strptime(x, '%Y-%m-%d')
for i,c in zip([like, focus, article], ['b-', 'r--', 'go']):
    i = sorted(i.items(), key = lambda x : x[0])  # 将字典变为列表并按日期排序
    x = [datetime.datetime.strptime(i[0],'%Y-%m-%d') for i in i] # 从字符串'2016-10-22'到datetime标准日期格式生成横轴
    y = [i[1] for i in i] # 生成纵轴
    plt.plot_date(x, y, c)

plt.savefig('2.jpg',dpi=300) # 保存图片,分辨率设置为尚可
plt.show()
上一篇下一篇

猜你喜欢

热点阅读