import requests
import re
from lxml import etree
from handle_db import db_data
from function import *
import time


def handle_decode(input_data, share_id):
    search_douyin_str = re.compile(r'抖音ID：')
    # 抖音web分享界面数字破解列表
    regex_list = [
        {'name': [' &#xe603; ', ' &#xe60d; ', ' &#xe616; '], 'value': 0},
        {'name': [' &#xe602; ', ' &#xe60e; ', ' &#xe618; '], 'value': 1},
        {'name': [' &#xe605; ', ' &#xe610; ', ' &#xe617; '], 'value': 2},
        {'name': [' &#xe604; ', ' &#xe611; ', ' &#xe61a; '], 'value': 3},
        {'name': [' &#xe606; ', ' &#xe60c; ', ' &#xe619; '], 'value': 4},
        {'name': [' &#xe607; ', ' &#xe60f; ', ' &#xe61b; '], 'value': 5},
        {'name': [' &#xe608; ', ' &#xe612; ', ' &#xe61f; '], 'value': 6},
        {'name': [' &#xe60a; ', ' &#xe613; ', ' &#xe61c; '], 'value': 7},
        {'name': [' &#xe60b; ', ' &#xe614; ', ' &#xe61d; '], 'value': 8},
        {'name': [' &#xe609; ', ' &#xe615; ', ' &#xe61e; '], 'value': 9},
    ]

    for i1 in regex_list:
        for i2 in i1['name']:
            input_data = re.sub(i2, str(i1['value']), input_data)
    # print (input_data)
    # 构造HTML结构
    share_web_html = etree.HTML(input_data)
    # 数据字典
    user_info = {}
    # user_info['nickname'] = share_web_html.xpath("//p[@class='nickname']/text()")[0]
    # douyin_id1 = share_web_html.xpath("//p[@class='shortid']/text()")[0].replace(' ','')
    # douyin_id2 = ''.join(share_web_html.xpath("//p[@class='shortid']/i/text()"))
    # user_info['douyin_id'] = re.sub(search_douyin_str,'',douyin_id1+douyin_id2)
    # user_info['describe'] = share_web_html.xpath("//p[@class='signature']/text()")[0]

    user_info['location'] = share_web_html.xpath("//p[@class='extra-info']/span[1]/text()")[0]
    user_info['xingzuo'] = share_web_html.xpath("//p[@class='extra-info']/span[2]/text()")[0]
    user_info['following_count'] = ''.join(share_web_html.xpath("//p[@class='follow-info']/span[1]//i/text()"))
    fans = ''.join(share_web_html.xpath("//p[@class='follow-info']/span[2]//i/text()"))
    danwei1 = share_web_html.xpath("//p[@class='follow-info']/span[2]/span[@class='num']/text()")[-1]

    if danwei1.strip() == 'w':
        user_info['follower_count'] = int(int(fans) / 10 * 10000)
    else:
        user_info['follower_count'] = int(fans)

    like = ''.join(share_web_html.xpath("//p[@class='follow-info']/span[3]//i/text()"))
    danwei2 = share_web_html.xpath("//p[@class='follow-info']/span[2]/span[@class='num']/text()")[-1].replace(' ', '')
    if danwei2.strip() == 'w':
        user_info['total_favorited'] = int(int(like) / 10 * 10000)
    else:
        user_info['total_favorited'] = int(like)
    user_info['aweme_count'] = ''.join(share_web_html.xpath(
        "//div[@class='video-tab']/div[@class='tab-wrap']/div[@class='user-tab active tab get-list']/span/i/text()"))
    user_info['favoriting_count'] = ''.join(share_web_html.xpath(
        "//div[@class='video-tab']/div[@class='tab-wrap']/div[@class='like-tab tab get-list']/span/i/text()"))

    # 数据更新到数据库
    # 评论以及回复用户信息数据更新到评论、回复用户信息表
    # db_data.handle_data_update('douyin_comment_user', 'uid', share_id, '\'', user_info)

    print(user_info)


def handle_douyin_web_share(share_id):
    share_web_url = "https://www.douyin.com/share/user/{0}".format(share_id)
    share_web_header = {
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36",
    }
    # 请求回来的文本数据
    share_web_response = requests.get(url=share_web_url, headers=share_web_header)
    handle_decode(share_web_response.text, share_id)


# douyin_video_author表用户处理
# field:需要取出的字段,table:表名
video_author_all = db_data.get_uid_info('uid', 'douyin_comment_user')
# for user in video_author_all:
#     share_id = user[0]
handle_douyin_web_share('100014091929')
# 暂停0.5s  防止数据库出问题
time.sleep(0.5)
