这是学习python第二天,
由于有项目需要,必须开始快速弄
头一天大概看了下语法、python爬虫的视频,决定今天开整
首先,读一下别人的代码,自己给加上注释
#引入包
import json
from multiprocessing import Pool
import requests
from requests.exceptions import RequestException
import re
#获取页面,并返回页面代码
def get_one_page(url):
try:
response = requests.get(url)
if response.status_code == 200: #访问成功
return response.text
return None
except RequestException:
return None
#使用正则表达式解析页面
def parse_one_page(html):
pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>.*?data-src="(.*?)".*?name"><a'
+'.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
+'.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)
items = re.findall(pattern, html)
for item in items:
yield {
'index': item[0],
'image': item[1],
'title': item[2],
'actor': item[3].strip()[3:],
'time': item[4].strip()[5:],
'score': item[5]+item[6]
}
#写入文件
def write_to_file(content):
with open('result.txt', 'a', encoding='utf-8') as f:
f.write(json.dumps(content, ensure_ascii=False) + '\n')
f.close()
def main(offset):
url = 'http://maoyan.com/board/4?offset=' + str(offset)
html = get_one_page(url)
for item in parse_one_page(html):
print(item)
write_to_file(item)
if __name__ == '__main__':
pool = Pool()
pool.map(main, [i*10 for i in range(10)])
pool.close()
pool.join()
'''
https://www.zhihu.com/question/49136398 这个网址对此语句有详细解释和程序说明
注意if __name__ == '__main__'这一行,当模块从import当中加载的时候这行保证下面的代码不会执行。
'''
今天要做的就是抓取页面的序列页面,和再抓取下一层的页面详细内容
#引入包
import json
from multiprocessing import Pool
import requests
from requests.exceptions import RequestException
import re
import pymysql
host = '127.0.0.1'
username = 'root'
password = '123456'
database = 'heldum'
#测试数据库连接
def testconnect():
#打开数据库链接
db = pymysql.connect(host,username,password,database)
#使用cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
#使用execute()方法执行SQL查询
cursor.execute("select version()")
#使用fetchone ()获取单条数据
data = cursor.fetchone()
print(data)
db.close()
def InsertData(sql,item):
#打开数据库链接
db = pymysql.connect(host,username,password,database,charset='utf8')
#使用cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
remark = "测试插入信息"
#Sql 插入语句
#sql = "insert into gtzy(lm) " + "VALUES ('aaaa')"
try:
#执行sql
#print("执行插入")
tt = cursor.execute(sql,(item.__getitem__('url'),item.__getitem__('lm'),item.__getitem__('title'),item.__getitem__('date'),parse_one_page_content(get_one_page(item.__getitem__('url')))))
db.commit()
except UnicodeEncodeError as e :
#发生错误时回滚
print(e)
db.rollback()
db.close()
#加入headers
headers = { "Accept":"text/html,application/xhtml+xml,application/xml;",
"Accept-Encoding":"gzip",
"Accept-Language":"zh-CN,zh;q=0.8",
"Referer":"http://www.example.com/",
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36" }
#先写主函数
def main(page_num):
url = 'url?key=%&type=0&page=' + str(page_num)
html = get_one_page(url)
for item in parse_one_page(html):
print(str(page_num))
InsertData("insert into gtzy(url,lm,title,date,content) VALUES (%s,%s,%s,%s,%s)",item)
#write_to_file(item)
#获取主要内容页面内容
def parse_one_page_content(html):
pattern = re.compile(' ', re.S)
items = re.findall(pattern, html)
dr = re.compile(r'<[^>]+>', re.S)
dd = dr.sub('', items[0])
return dd
#获取页面,并返回页面代码
def get_one_page(url):
try:
response = requests.get(url,headers=headers)
if response.status_code == 200: #访问成功
return response.text
return None
except RequestException:
return None
'''
遇到403服务器拒绝爬虫的服务协议怎么办,要加入headers
'''
#使用正则表达式解析页面
def parse_one_page(html):
pattern = re.compile('', re.S)
items = re.findall(pattern, html)
#print(items)
for item in items:
yield {
'url': item[0],
'title': item[1],
'lm': item[2],
'date': item[3]
}
#写入文件
def write_to_file(content):
with open('result.txt', 'a', encoding='utf-8') as f:
f.write(json.dumps(content, ensure_ascii=False) + '\n')
f.close()
if __name__ == '__main__':
#InsertDate("insert into gtzy(url,lm,title,date,content) " +"VALUES ('"+username+"','aaaa','aaaa','aaaa','aaaa')")
pool = Pool()
pool.map(main, [i-1 for i in range(5891)])
pool.close()
pool.join()
'''
https://www.zhihu.com/question/49136398 这个网址对此语句有详细解释和程序说明
注意if __name__ == '__main__'这一行,当模块从import当中加载的时候这行保证下面的代码不会执行。
'''
最后成功解决了