# coding=utf-8
import time
import urllib.request
from bs4 import BeautifulSoup
import math
import json
import re
import math

import sys

sys.path.append("..")

from script.deqQueue import DeqQueue

#后加的
import json
import time
import base64
import rsa
import binascii
import requests
import re
import random
from datetime import datetime

try:
    from PIL import Image
except:
    pass
try:
    from urllib.parse import quote_plus
except:
    from urllib import quote_plus

# 构造 Request headers
agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0'
headers = {
    'User-Agent': agent
}

session = requests.session()


# 访问 初始页面带上 cookie
index_url = "http://weibo.com/login.php"
try:
    session.get(index_url, headers=headers, timeout=2)
except:
    session.get(index_url, headers=headers)
try:
    input = raw_input
except:
    pass

def get_su(username):
    username_quote = quote_plus(username)
    username_base64 = base64.b64encode(username_quote.encode("utf-8"))
    return username_base64.decode("utf-8")

# 预登陆获得 servertime, nonce, pubkey, rsakv
def get_server_data(su):
    pre_url = "http://login.sina.com.cn/sso/prelogin.php?entry=weibo&callback=sinaSSOController.preloginCallBack&su="
    pre_url = pre_url + su + "&rsakt=mod&checkpin=1&client=ssologin.js(v1.4.18)&_="
    pre_url = pre_url + str(int(time.time() * 1000))
    pre_data_res = session.get(pre_url, headers=headers)

    sever_data = eval(pre_data_res.content.decode("utf-8").replace("sinaSSOController.preloginCallBack", ''))

    return sever_data

def get_password(password, servertime, nonce, pubkey):
    rsaPublickey = int(pubkey, 16)
    key = rsa.PublicKey(rsaPublickey, 65537)  # 创建公钥
    message = str(servertime) + '\t' + str(nonce) + '\n' + str(password)  # 拼接明文js加密文件中得到
    message = message.encode("utf-8")
    passwd = rsa.encrypt(message, key)  # 加密
    passwd = binascii.b2a_hex(passwd)  # 将加密信息转换为16进制。
    return passwd

def get_cha(pcid):
    cha_url = "http://login.sina.com.cn/cgi/pin.php?r="
    cha_url = cha_url + str(int(random.random() * 100000000)) + "&s=0&p="
    cha_url = cha_url + pcid
    cha_page = session.get(cha_url, headers=headers)
    with open("cha.jpg", 'wb') as f:
        f.write(cha_page.content)
        f.close()
    try:
        im = Image.open("cha.jpg")
        im.show()
        im.close()
    except:
        print(u"请到当前目录下，找到验证码后输入")


def login(username, password):
    # su 是加密后的用户名
    su = get_su(username)
    sever_data = get_server_data(su)
    servertime = sever_data["servertime"]
    nonce = sever_data['nonce']
    rsakv = sever_data["rsakv"]
    pubkey = sever_data["pubkey"]
    showpin = sever_data["showpin"]
    password_secret = get_password(password, servertime, nonce, pubkey)

    postdata = {
        'entry': 'weibo',
        'gateway': '1',
        'from': '',
        'savestate': '7',
        'useticket': '1',
        'pagerefer': "http://login.sina.com.cn/sso/logout.php?entry=miniblog&r=http%3A%2F%2Fweibo.com%2Flogout.php%3Fbackurl",
        'vsnf': '1',
        'su': su,
        'service': 'miniblog',
        'servertime': servertime,
        'nonce': nonce,
        'pwencode': 'rsa2',
        'rsakv': rsakv,
        'sp': password_secret,
        'sr': '1366*768',
        'encoding': 'UTF-8',
        'prelt': '115',
        'url': 'http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack',
        'returntype': 'META'
    }
    login_url = 'http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.18)'

    fp = open("cookies.txt", "wb+")
    cookies_in_file = fp.read()
    if (len(cookies_in_file) > 0):
        session.cookies.update(json.loads(cookies_in_file))

    if showpin == 0:
        login_page = session.post(login_url, data=postdata, headers=headers)
    else:
        pcid = sever_data["pcid"]
        get_cha(pcid)
        postdata['door'] = input(u"请输入验证码")
        login_page = session.post(login_url, data=postdata, headers=headers)
    login_loop = (login_page.content.decode("GBK"))
    # print(login_loop)
    pa = r'location\.replace\([\'"](.*?)[\'"]\)'
    loop_url = re.findall(pa, login_loop)[0]
    # print(loop_url)
    # 此出还可以加上一个是否登录成功的判断，下次改进的时候写上
    login_index = session.get(loop_url, headers=headers)
    uuid = login_index.text
    uuid_pa = r'"uniqueid":"(.*?)"'
    uuid_res = re.findall(uuid_pa, uuid, re.S)[0]
    web_weibo_url = "http://weibo.com/%s/profile?topnav=1&wvr=6&is_all=1" % uuid_res
    weibo_page = session.get(web_weibo_url, headers=headers)
    weibo_pa = r'<title>(.*?)</title>'
    # print(weibo_page.content.decode("utf-8"))
    userID = re.findall(weibo_pa, weibo_page.content.decode("utf-8", 'ignore'), re.S)[0]
    print(u"欢迎你 %s, 登录成功" % userID)

    #保存cookies
    if (cookies_in_file == ""):
        fp.write(json.dumps(session.cookies.get_dict()))
    fp.close()


    recv_data_sample = {'sid': 3, 'gid': 3, 'url': 'https://weibo.com/huobicom?refer_flag=1001030101_',
                        'data': {
                            'content': "sdfs", 'title': "title",
                            'url': 'www.baidu.com',  # 每条推文的链接
                            'status': 0,
                            'issue_time': '',  # 推文的发布时间
                            'create_time': 123456,  # 当前时间
                            'update_time': 123456,  # 当前时间
                            'tag_str': 'tag',
                            'source': "weibo",  # 由编辑来制定
                            'twitter_name': '', # 名称
                            'type':3

                        }, 'state': 0}  # 0表示正常，抓取失败则为-1


    queue = DeqQueue()

    data_str = ""

    while data_str == "":
        data_str = queue.get("link_weibo")  # 当data_str是空的时候，从队列取得数据
        
        if (data_str == None or data_str == ''):
            data_str = ""
            print("data is empty")
            continue

        # print(data_str)
        data = json.loads(data_str)  # 判断json中有没有url
        url = ''  # 定义一个空的url

        if 'url' in data:
            scrape_url = data['url']

        print(scrape_url)

        if scrape_url.startswith('http://s.weibo'):
            data_str = ""
            continue

        # print ("crawler url={}".format(url))

        scrape_html = session.get(scrape_url, headers=headers)

        scrape_page_content = scrape_html.content.decode("utf-8")

        #创建几个字典和列表
        # weibo_item_content = {}
        weibo_time_array = []
        weibo_link_array = []
        weibo_text_array = []

        # 抓取的微博的名字，成功
        weibo_name_pattern = re.compile(r'<title>.*</title>')
        weibo_name = weibo_name_pattern.search(scrape_page_content).group(0)
        weibo_name = weibo_name.replace("<title>", "")
        weibo_name = weibo_name.replace("</title>", "")
        weibo_name = weibo_name.replace("_微博", "")
        # print(weibo_name)

        #时间，成功
        time_pattern = re.compile(r'<a name=.*?target=\\"_blank\\".*?href.*?profile&wvr=6&mod=weibotime.*?title.*?date.*?node-type.*?<\\/a>')
        time_list = time_pattern.findall(scrape_page_content)
        time_detail_pattern = re.compile(r'\d{4}-\d{1,2}-\d{1,2} \d{1,2}:\d{1,2}')
        # print(time_list)
        for item_time in time_list:
            time_detail = time_detail_pattern.findall(item_time)[0]
            timeArray = datetime.strptime(time_detail, "%Y-%m-%d %H:%M")
            time_stamp = int(timeArray.timestamp()) #这里使用的是datatime的时间戳方法
            # print(time_stamp)
            weibo_time_array.append(time_stamp)

        #链接
        link_pattern = re.compile(r'<a name=.*?target=\\"_blank\\".*?href.*?profile&wvr=6&mod=weibotime.*?title.*?date.*?node-type.*?<\\/a>')
        link_list = link_pattern.findall(scrape_page_content)
        # print(link_list)
        link_detail_pattern = re.compile(r'href=\\".*\?from=page.*mod=weibotime\\"')
        for link_item in link_list:
            link_detail = link_detail_pattern.search(link_item).group(0).replace("href=\\\"\\","").replace("\"","")
            link_detail = link_detail.replace("\\","")
            link_detail = "https://weibo.com" + link_detail
            # print(link_detail)
            weibo_link_array.append(link_detail)

        #正文
        weibo_text_pattern = re.compile(r'<div class=\\"WB_text W_f14\\" node-type=\\"feed_list_content\\".*?<\\/div>')
        weibo_text_list = weibo_text_pattern.findall(scrape_page_content)
        weibo_hanzi_pattern = re.compile(r'\w*\d*[\u4e00-\u9fa5]+\d*[\u4e00-\u9fa5]+')
        for weibo_text in weibo_text_list:
            weibo_text = weibo_text.replace("<\/div>","")
            weibo_text = weibo_text.replace(" ","")
            weibo_text = weibo_text.replace(r'<divclass=\"WB_textW_f14\"node-type=\"feed_list_content\">\n', "")
            weibo_text = weibo_text.replace(r'<divclass=\"WB_textW_f14\"node-type=\"feed_list_content\"nick-name=\"\">\n', "")
            weibo_hanzi = weibo_hanzi_pattern.findall(weibo_text)
            # print(weibo_hanzi)
            weibo_str = ''
            for i in range(len(weibo_hanzi)):
                weibo_hanzi[i] = weibo_hanzi[i].replace("n","")
                weibo_str = weibo_str + weibo_hanzi[i] + '，'
            weibo_text_array.append(weibo_str)

        for i in range(len(weibo_text_array)):
            recv_data_sample['data']['content'] = weibo_text_array[i]
            recv_data_sample['url'] = weibo_link_array[i]
            recv_data_sample['data']['url'] = weibo_link_array[i]
            recv_data_sample['data']['issue_time'] = weibo_time_array[i]
            recv_data_sample['data']['create_time'] = int(time.time())
            recv_data_sample['data']['update_time'] = int(time.time())
            recv_data_sample['data']['twitter_name'] = weibo_name

       
            #判断是不是今天的
            time_now = int(time.time())
            if time_now - weibo_time_array[i] > 86400:
                print ("time is not match,url={}".format(recv_data_sample['data']['url']))
                data_str = ""
                continue

            if recv_data_sample['data']['content'] is '':
                recv_data_sample['state'] = -1
            else:
                # pass
            	recv_data_sample['data']['status'] = 1

            print(recv_data_sample)  # 打印一下抓取到的内容

            queue.put("twitter_result", json.dumps(recv_data_sample))  # 再插回到队列

            data_str = ""

if __name__ == "__main__":
    username = "13521093039"
    password = "bonjour123,./"
    login(username, password)