import urllib.request
import re
import http.cookiejar
import gzip
import http.cookiejar
import codecs
import queue
import threading
import time


def ungzip(data):
	try:
		print('正在解压...')
		data=gzip.decompress(data)
		print('解压完毕...')
	except:
		print('解压失败')
	return data
def get_xsrf(data):
	#print(data.encode('utf8'))
	cer=re.compile('name=\"_xsrf\" value=\"(.+)\"',flags=0)
	strlist=cer.findall(data)
	return strlist[0]
def getOpener(head):
	cj=http.cookiejar.CookieJar()
	opener=urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
	header=[]
	for key,value in head.items():
		elem=(key,value)
		header.append(elem)
	opener.addheaders=header
	return opener

def search_answer_url(data):
	find_url=re.compile('class=\"question_link\"\shref=\"(/question/[0-9]+/answer/[0-9]+)\"',flags=0)
	url=find_url.findall(data)
	url=set(url)
	return url
'''	for u in url:
            queue.heappush(u)'''
def do_work(url):
    op=opener.open(url)
    data=op.read()
    data=ungzip(data)
    find_ansid=re.compile('data-author-name=\"机器人\"(.|\n)+?class=\"zg-anchor-hidden ac\" name=\"([0-9]+)-comment\"',flags=0)#注意替换用户名
    ansid=find_ansid.findall(data.decode('utf8'))
    return ansid[0]
    
def get_into_everypage():
    while True:
        url=que.get()
        print(url)
        answer_id=do_work(url)
        print(answer_id[1])
        behaviors={
	'method':'vote_up',
	'params':'{"answer_id":"%s"}' %answer_id[1],
	'_xsrf':_xsrf
        }
        data=urllib.parse.urlencode(behaviors).encode("utf8")
        op=opener.open('https://www.zhihu.com/node/AnswerVoteBarV2',data)
        i=0
        while que.empty():
            time.sleep(10)
            i=i+1
            if i>10:
                break
        if i>10:
            break
def get_pages_of_answer():
    i=2
    while i<12:#点赞多少页需要注明
        url=persion_url+'?page=%s' %str(i)
        i+=1
        op=opener.open(url)
        data=op.read()
        data=ungzip(data)
        url=search_answer_url(data.decode('utf8'))
        for u in url:
            u='https://www.zhihu.com'+ u
            que.put(u)
            print(u.encode('utf8'))

header={
	'connection':'Keep-Alive',
	'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
	'Upgrade-Insecure-Requests':'1',
	'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.3',
	#'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
	'Accept-Encoding':'gzip, deflate, sdch',
	'Accept-Language':'zh-CN,zh;q=0.8'
}

url="https://www.zhihu.com"
opener=getOpener(header)
op=opener.open(url)
data=op.read()
data=ungzip(data)
_xsrf=get_xsrf(data.decode('utf8'))
print(_xsrf.encode('utf8'))
url=url+'/login/email'
id='用户名'  
passwd='密码'
postDict={
	'_xsrf':_xsrf,
	'email':id,
	'password':passwd,
	'rememberme':'y'
}
data=urllib.parse.urlencode(postDict).encode('utf8')
op=opener.open(url,data)

persion_url='https://www.zhihu.com/people/baiyangtx/answers'  #个人主页上回答的连接
op=opener.open(persion_url)
data=op.read()
data=ungzip(data)
que=queue.Queue()
url=search_answer_url(data.decode('utf8'))
for u in url:
    u='https://www.zhihu.com'+ u
    que.put(u)
    print(u.encode('utf8'))

t1=threading.Thread(target=get_into_everypage)
t2=threading.Thread(target=get_pages_of_answer)
t1.start()
t2.start()
t2.join()
t1.join()




