# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import re
import urllib2
import urllib
import time
from urlparse import *
def get_title_from_uri(uri):
import mechanize
import cookielib
br = mechanize.Browser()
br.set_cookiejar(cookielib.LWPCookieJar()) # Cookie jar
br.set_handle_equiv(True) # Browser Option
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_refresh(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
print br.open(uri)
print br.title()
#return br.title()
#get_title_from_uri('http://www.example.gov.cn')
import urllib2
from BeautifulSoup import BeautifulSoup
def bs_title(url):
""""""
try:
html = urllib2.urlopen(url).read()
soup = BeautifulSoup(html)
#encoding = str(chardet.detect(html)['encoding'])
#if encoding == 'GB2312':
#soup = BeautifulSoup(html,fromEncoding="GB18030")
#else:
#soup = BeautifulSoup(html,fromEncoding=encoding)
#print url.strip() +':'+ str(encoding) +':'+ soup.title.string
return str(soup.title.string)
except Exception,e:
print str(e)
pass
print bs_title('http://www.xx.com')
import urllib2
import chardet
import requests
import re
import chardet
import urllib
from lxml import etree
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from libs.Conn_scan import Mongo,get_request
from BeautifulSoup import BeautifulSoup
class title(object):
""""""
#----------------------------------------------------------------------
def __init__(self, url):
"""Constructor"""
super(title, self).__init__()
self.url = url
def utf8_transfer(strs):
'''''
utf8编码转换
'''
try:
if isinstance(strs, unicode):
strs = strs.encode('utf-8')
elif chardet.detect(strs)['encoding'] == 'GB2312':
strs = strs.decode("gb2312", 'ignore').encode('utf-8')
elif chardet.detect(strs)['encoding'] == 'utf-8':
strs = strs.decode('utf-8', 'ignore').encode('utf-8')
except Exception, e:
print 'utf8_transfer error', strs, e
return strs
def itle_xpath(Html):
'''''
用xpath抽取网页Title
'''
Html = utf8_transfer(Html)
Html_encoding = chardet.detect(Html)['encoding']
page = etree.HTML(Html, parser=etree.HTMLParser(encoding=Html_encoding))
title = page.xpath('/html/head/title/text()')
try:
title = title[0].strip()
except IndexError:
print 'Nothing'
pass
return title
def title_re(Html):
'''''
用re抽取网页Title
'''
Html = utf8_transfer(Html)
compile_rule = ur'<title>.*</title>'
title_list = re.findall(compile_rule, Html)
if title_list == []:
title = ''
else:
title = title_list[0][7:-8]
return title
#----------------------------------------------------------------------
def phJS_title(self):
""""""
try:
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = (
"Mozilla/5.0 (Linux; Android 5.1.1; Nexus 6 Build/LYZ28E) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.23 Mobile Safari/537.36"
)
driver = webdriver.PhantomJS(desired_capabilities=dcap)
driver.get(self.url)
title = driver.title
driver.quit()
return title
except Exception,e:
print str(e)
Mongo.coll['title'].update({"URL":self.url},
{"$set": {'title' : 'Null'}},
upsert = True)
Mongo.coll['Task'].update({"URL":self.url},
{"$set": {'title':'0'}},
upsert = True)
#----------------------------------------------------------------------
def run(self):
""""""
if self.phJS_title():
title = self.phJS_title
elif:
title =
print title
Mongo.coll['title'].update({"URL":self.url},
{"$set": {'title' : title}},
upsert = True)
Mongo.coll['Task'].update({"URL":self.url},
{"$set": {'title':'1'}},
upsert = True)
#----------------------------------------------------------------------
def get_title(url):
""""""
title_url = title(url)
title_url.run()
#get_title('http://www.baidu.com')