python crawler 知乎用户数据

先写一点准备的函数,文件名为getZhihuInfo.py

import requests
from bs4 import BeautifulSoup
import json

headers = {
    'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
    'Accept-Encoding':'gzip, deflate',
    'Accept-Language':'zh-CN,zh;q=0.8',
    'Connection': 'keep-alive',
    'Cache-Control':'max-age=0',
    'Cookie':'#你自己的',
    'Host': 'www.zhihu.com',
    'Referer': 'https://www.zhihu.com/people',
    'User-Agent':'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 Safari/537.36',
}
headers_post = {
    'Accept':'*/*',
    'Accept-Encoding':'gzip, deflate',
    'Accept-Language':'zh-CN,zh;q=0.8',
    'Connection':'keep-alive',
    'Content-Length':'16',
    'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',
    'Referer':'https://www.zhihu.com/people',
    'User-Agent':'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 Safari/537.36',
    'Cookie':'#你自己的',
    'Host':'www.zhihu.com',
    'Origin':'https://www.zhihu.com',
    'X-Requested-With':'XMLHttpRequest',
    'X-Xsrftoken':'82f9b2f5e3166156c04eeb491ac6f21e'
}

#判断是否为空
def setValue(soupS):
    if soupS:
        return soupS[0].get_text()
    else:
        return 'Unknown'

#获得每个用户Url的基础信息,返回一个list
def getBasicInfo(peopleUrl):
    wb_data = requests.get(peopleUrl,headers = headers)
    soup = BeautifulSoup(wb_data.text,'lxml')

    name = soup.select('div.title-section > span')[0].get_text()

    alocation = soup.select('span.location.item')
    abusiness = soup.select('span.business.item')
    agender = soup.select('span.item.gender > i')
    aemployment = soup.select('span.employment.item')
    aposition = soup.select('span.position.item')
    aeducation = soup.select('span.education.item')
    aeducation_extra = soup.select('span.education-extra.item')

    location = setValue(alocation)
    business = setValue(abusiness)
    if agender:
        gender = agender[0].get('class')[1][13:]
    else:
        gender = 'Unknown'
    employment = setValue(aemployment)
    position = setValue(aposition)
    education = setValue(aeducation)
    education_extra = setValue(aeducation_extra)

    agree = soup.select('span.zm-profile-header-user-agree > strong')[0].get_text()
    thanks = soup.select('span.zm-profile-header-user-thanks > strong')[0].get_text()
    action5 = soup.select('span.num')
    asks = action5[0].get_text()
    answers = action5[1].get_text()
    posts = action5[2].get_text()
    if len(action5) > 3:
        collections = action5[3].get_text()
        logs = action5[4].get_text()
    else:
        collections = 'Null'
        logs = 'Null'

    followees = soup.select('a.item > strong')[0].get_text()
    followers = soup.select('a.item > strong')[1].get_text()

    focus2 = soup.select('div.zm-profile-side-section-title > a > strong')
    if len(focus2) == 2:
        zl = focus2[0].get_text()[:-3]
        ht = focus2[1].get_text()[:-3]
    else:
        ht = focus2[0].get_text()[:-3]
        zl = '0'
    basicInfoSet = [name,location,business,gender,employment,position,education,education_extra,agree,thanks,asks,answers,posts,collections,logs,followees,followers,zl,ht]
    return basicInfoSet

#获得每个用户关注的用户的URL,返回list
def getFolloweesUrl(OneUrl):
    url = OneUrl + '/followees'
    wb_data = requests.get(url,headers = headers)
    soup = BeautifulSoup(wb_data.text,'lxml')
    alist = soup.select('a.zg-link.author-link')
    followeeUrlSet = []
    if alist:
        for i in alist:
            followeeUrlSet.append(i.get('href'))
        #print(len(followeeSet))
        return followeeUrlSet

#获得每个用户某日期最近20条动态,返回一个字典
def postActivitiesByDate(Purl,byDate):
    url = Purl + '/activities'
    data = {
        'start': byDate
    }
    wb_data = requests.post(url,headers = headers_post,data = data)
    #print(wb_data)

    soup = BeautifulSoup(wb_data.json()['msg'][1], 'lxml')
    activities = soup.select('div.zm-profile-section-item.zm-item.clearfix')
    actdata = {}
    for i in activities:
        actdata[i.get('data-time')] = i.get('data-type-detail')
    return actdata

再获得一堆用户的知乎个人主页的网址,存储在MongoDB中:

from getZhihuInfo import getFolloweesUrl
import pymongo

client = pymongo.MongoClient('localhost',27017)
zhiHu = client['zhiHu']
zhiHuId = zhiHu['zhiHuId']

#初始Url,要先找一个个人主页的网址
urlSet = ['']

#初始设置
# zhiHuId.remove()
# fd1 = {
#     'id':0,
#     'followees':urlSet
# }
# zhiHuId.insert_one(fd1)

begin = 0   #初始为0
end = 1000
dbId = 0   #最大的id值
for k in range(begin,end):
    for i in zhiHuId.find_one({'id': k})['followees']:
        followees = getFolloweesUrl(i)
        dbId +=1
        fd = {
            'id':dbId,
            'followees':followees
        }
        zhiHuId.insert_one(fd)
        print(dbId)

然后,把每个Url从数据库中提取出来,然后获得各项数据,存储在数据库中

from getZhihuInfo import setValue,getBasicInfo,postActivitiesByDate

import pymongo
import time


client = pymongo.MongoClient('localhost',27017)
zhiHu = client['zhiHu']
zhiHuId = zhiHu['zhiHuId']
zhiHuDetail = zhiHu['zhiHuDetail']

#初始Url
OneUrl = ''
#需要获取的信息的by日期
byDate160909 = 1473379200

begin = 0   #初始为0
end = 1000
count = 0
for k in range(begin,end):
    x = zhiHuId.find_one({'id': k})['followees']
    if x:
        for i in x:
            y = getBasicInfo(i)
            z = postActivitiesByDate(i,byDate160909)

            oneData = {
                'name':y[0],'location':y[1],'business':y[2],
                'gender':y[3], 'employment':y[4], 'position':y[5], 'education':y[6],
                'education_extra':y[7], 'agree':y[8],'thanks':y[9], 'asks':y[10],
                'answers':y[11], 'posts':y[12], 'collections':y[13], 'logs':y[14], 'followees':y[15],
                'followers':y[16], 'zl':y[17], 'ht':y[18],
                'activities':z
            }
            zhiHuDetail.insert_one(oneData)
            count += 1
            print(k,'----',count)
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 204,684评论 6 478
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 87,143评论 2 381
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 151,214评论 0 337
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 54,788评论 1 277
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 63,796评论 5 368
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 48,665评论 1 281
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 38,027评论 3 399
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 36,679评论 0 258
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 41,346评论 1 299
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 35,664评论 2 321
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 37,766评论 1 331
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 33,412评论 4 321
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 39,015评论 3 307
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 29,974评论 0 19
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 31,203评论 1 260
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 45,073评论 2 350
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 42,501评论 2 343

推荐阅读更多精彩内容