# -*- coding: utf8 -*-

import urllib
import http.cookiejar
import re
import bs4
URL_BAIDU_INDEX = u'http://www.baidu.com/'
#https://passport.baidu.com/v2/api/?getapi&class=login&tpl=mn&tangram=true 也可以用这个
URL_BAIDU_TOKEN = 'https://passport.baidu.com/v2/api/?getapi&tpl=pp&apiver=v3&class=login'
URL_BAIDU_LOGIN = 'https://passport.baidu.com/v2/api/?login'
#设置用户名、密码
username = 'skyclub66'
password = '1987913tjl'
#设置cookie，这里cookiejar可自动管理，无需手动指定
cj = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
urllib.request.install_opener(opener)
reqReturn = urllib.request.urlopen(URL_BAIDU_INDEX)
#获取token,
tokenReturn = urllib.request.urlopen(URL_BAIDU_TOKEN)
html=tokenReturn.read()
matchVal = re.search(u'"token" : "(?P<tokenVal>.*?)"',html.decode())
tokenVal = matchVal.group('tokenVal')
#构造登录请求参数，该请求数据是通过抓包获得，对应https://passport.baidu.com/v2/api/?login请求
postData = {
'username' : username,
'password' : password,
'u' : 'https://passport.baidu.com/',
'tpl' : 'pp',
'token' : tokenVal,
'staticpage' : 'https://passport.baidu.com/static/passpc-account/html/v3Jump.html',
'isPhone' : 'false',
'charset' : 'UTF-8',
'callback' : 'parent.bd__pcbs__ra48vi'
}
postData = urllib.parse.urlencode(postData).encode(encoding='UTF8')
#发送登录请求
headers = {'User-Agent':''}
headers['Accept']='text/html,application/xhtml+xml,application/xmlq=0.9,*/*q=0.8'
headers['Accept-Encoding']='gzip,deflate,sdch'
headers['Accept-Language']='zh-CN,zhq=0.8'
headers['User-Agent']='Mozilla/5.0 (Windows NT 6.1 WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36'
headers['Content-Type']='application/x-www-form-urlencoded'
loginRequest = urllib.request.Request(URL_BAIDU_LOGIN,postData,headers)



sendPost = urllib.request.urlopen(loginRequest)
#查看贴吧个人主页 ，测试是否登陆成功，由于cookie自动管理，这里处理起来方便很多
#http://tieba.baidu.com/home/main?un=XXXX&fr=index 这个是贴吧个人主页，各项信息都可以在此找到链接
teibaUrl = 'http://tieba.baidu.com/f/like/furank?kw=%E6%96%B0%E5%AE%8B&ie=utf-8&pn=2'
content = urllib.request.urlopen(teibaUrl).read()
content = content.decode('gbk').encode('utf8')
#print(content)
#解析数据，用的BeautifulSoup4，感觉没有jsoup用的爽
soup = bs4.BeautifulSoup(content,"html.parser")
namelist = soup.find_all(attrs={'class':'drl_item_name'})


for l in namelist:
    
    print(l.text)