import csv
import requests
import re

codes = {
    "&#xe603;": "0", "&#xe60d;": "0", "&#xe616;": "0",
    "&#xe602;": "1", "&#xe60e;": "1", "&#xe618;": "1",
    "&#xe605;": "2", "&#xe610;": "2", "&#xe617;": "2",
    "&#xe604;": "3", "&#xe611;": "3", "&#xe61a;": "3",
    "&#xe606;": "4", "&#xe60c;": "4", "&#xe619;": "4",
    "&#xe607;": "5", "&#xe60f;": "5", "&#xe61b;": "5",
    "&#xe608;": "6", "&#xe612;": "6", "&#xe61f;": "6",
    "&#xe60a;": "7", "&#xe613;": "7", "&#xe61c;": "7",
    "&#xe60b;": "8", "&#xe614;": "8", "&#xe61d;": "8",
    "&#xe609;": "9", "&#xe615;": "9", "&#xe61e;": "9"
}
HEADERS = {
    "Host": "www.iesdouyin.com",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
    "Connection": "keep-alive"
}


def page_sourse(url):
    req = requests.get(url, headers=HEADERS)
    dy_name = re.search('''(?<=<p class="nickname">)(.*?)(?=</p>)''', req.text).group()
    dy_name = dy_name.encode(encoding="gbk").decode(encoding="gbk")
    short_id = re.search('''(?<=<p class="shortid">)(.*?)(?=</p>)''', req.text).group()
    parse_id = short_id.replace("抖音ID：     ", "").replace('''<i class="icon iconfont "> ''', "").replace(" </i>",
                                                                                                         "").replace(
        " ", "")
    follower_block = re.search('''(?<=<span class="follower block">)(.*?)(?=</span>)''', req.text).group()
    follower_num = follower_block.replace('''<span class="num">    ''', "").replace(
        '''<i class="icon iconfont follow-num">''', "").replace(" </i> ", "").replace(" </i>", "").replace(" ", "")
    liked_num_block = re.search('''(?<=<span class="liked-num block">)(.*?)(?=</span>)''', req.text).group()
    liked_num = liked_num_block.replace('''<span class="num">    ''', "").replace(
        '''<i class="icon iconfont follow-num">''', "").replace(" </i> ", "").replace(" </i>", "").replace(" ", "")
    video_tab = re.search('''(?<=<div class="user-tab active tab get-list" data-type="post">)(.*?)(?=</span>)''',
                          req.text).group()
    tab_num = video_tab.replace("作品", "").replace('''<span class="num">    ''', "").replace(
        '''<i class="icon iconfont tab-num">''', "").replace(" </i> ", "").replace(" </i>", "").replace(" ", "")
    return dy_name, parse_id, follower_num, liked_num, tab_num


def get_shortid(url):
    dy_name, parse_id, follower_num, liked_num, tab_num = page_sourse(url)
    true_parse_id = code_replace(parse_id)  # 抖音ID
    true_follower_num = code_replace(follower_num)  # 粉丝数
    true_liked_num = code_replace(liked_num)  # 点赞数
    true_tab_num = code_replace(tab_num)  # 作品数
    return dy_name, true_parse_id, true_follower_num, true_liked_num, true_tab_num


def code_replace(parse_id):
    for key, value in codes.items():
        if key in parse_id:
            parse_id = parse_id.replace(key, value)
    return parse_id


def run():
    # 读取url
    with open('douyin_url.csv')as fr:
        fr_csv = csv.reader(fr)
        with open('douyin_result.csv', 'a+', newline='', encoding='utf-8-sig')as fw:
            # headers = ['用户名', 'ID', '粉丝数', '点赞数', '作品数']
            fw_csv = csv.writer(fw)
            # fw_csv.writerow(headers)
            for url in fr_csv:
                url = url[0]
                result = list(get_shortid(url))
                # 保存结果
                fw_csv.writerow(result)


if __name__ == '__main__':
    run()
