import urllib.request
import urllib.parse
import http.cookiejar
import time
import re
from bs4 import BeautifulSoup
import json
import bs4
import requests


cookiejar = http.cookiejar.CookieJar()
chandle = urllib.request.HTTPCookieProcessor(cookiejar)
opener = urllib.request.build_opener(chandle)


def make_cookie(domain, name, value):
    return http.cookiejar.Cookie(
        version=0,
        name=name,
        value=value,
        port=None,
        port_specified=False,
        domain=domain,
        domain_specified=True,
        domain_initial_dot=False,
        path="/",
        path_specified=True,
        secure=False,
        expires=None,
        discard=False,
        comment=None,
        comment_url=None,
        rest=None
    )

cookiejar.set_cookie(make_cookie(".baidu.com", "BDUSS", "NBVUdLU1pTVXVpUFlvR2E4eE82R2FHMUdubjhPLUNwM2pzbGRJZG9YWFF3bjlaSUFBQUFBJCQAAAAAAAAAAAEAAADvNnUwzqLQprXE0KHQobW2AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANA1WFnQNVhZbz"))


def timeNow():
    return int(round(time.time()*1000))

def getData(url):
    u = opener.open(url)
    data = u.read()
    try:
        data = data.decode('utf-8')
    except:
        data = data.decode('gbk', 'ignore')
    return data


#getData("http://www.baidu.com")



for q_cookie in cookiejar:
    print(q_cookie.name)
    print(q_cookie.value)


#print((getData('http://tieba.baidu.com/f/user/json_userinfo')))


cookies =dict(BDUSS="NBVUdLU1pTVXVpUFlvR2E4eE82R2FHMUdubjhPLUNwM2pzbGRJZG9YWFF3bjlaSUFBQUFBJCQAAAAAAAAAAAEAAADvNnUwzqLQprXE0KHQobW2AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANA1WFnQNVhZbz")

headers = {
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding":"gzip, deflate",
"Accept-Language":"en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4",
"Cache-Control":"no-cache",
"Connection":"keep-alive",
    "Host":"tieba.baidu.com",
    "Pragma":"no-cache",
    "Upgrade-Insecure-Requests":"1",
    "User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36"
}
datas =requests.get('http://tieba.baidu.com/f/user/json_userinfo', cookies=cookies, headers=headers)
print(datas.text)
# 查看贴吧个人主页 ，测试是否登陆成功，由于cookie自动管理，这里处理起来方便很多
# http://tieba.baidu.com/home/main?un=XXXX&fr=index 这个是贴吧个人主页，各项信息都可以在此找到链接
# teibaUrl = 'http://tieba.baidu.com/f/like/mylike?v=1387441831248'
# content = getData(teibaUrl)
# print(content)
# # 解析数据，用的BeautifulSoup4，感觉没有jsoup用的爽
# soup = BeautifulSoup(content,"lxml")
# li = soup.findAll('tr')
# print('贴吧链接\\t吧名\\t等级')
# print(li)
# li = li[1:len(li)]
# print(li)
# careTeibalist = []
# print('贴吧链接\\t吧名\\t等级')
# print(li)
# for elem in li:
#     soup1 = bs4.BeautifulSoup(str(elem))
#     print('http://tieba.baidu.com/'+soup1.find('a')['href']+'\\t'+soup1.find('a')['title']+'\\t' +
#           soup1.find('a',{'class','like_badge'})['title'])

