import requests, chardet, json, time
import random, re
from html.parser import HTMLParser
from datetime import datetime
from new_list.models import Blogs
import execjs


def get_js():
    f = open(r"/Users/zeng/PycharmProjects/django_web_new/new_list/signature.js", 'r', encoding='UTF-8')  ##打开JS文件
    line = f.readline()
    htmlstr = ''
    while line:
        htmlstr = htmlstr + line
        line = f.readline()
    ctx = execjs.compile(htmlstr)
    return ctx.call('get_as_cp_signature')


get_as_cp_signature = eval(get_js())
# proxies = {'http': 'http://119.123.177.36:9000', 'https': 'https://119.123.177.36:9000'}

html_parser = HTMLParser()
header1 = {
    'x-requested-with': 'XMLHttpRequest',
    'user-agent': '5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko'
}
header2 = {
    'user-agent': '5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko'
}
cookie = {'tt_webid': '6622557064899364356'}
list_ = ['__all__', 'news_hot', 'news_tech', 'news_game', 'news_car', 'news_finance', 'funny']
data = {
    # 'category': list_[t1],
    'utm_source': 'toutiao',
    'widen': '1',
    'max_behot_time': '0',
    'max_behot_time_tmp': '0',
    'tadrequire': 'true',
    'as': get_as_cp_signature['as'],
    'cp': get_as_cp_signature['cp'],
    '_signature': get_as_cp_signature['_signature']
}

url = 'https://www.toutiao.com/api/pc/feed/'
req = requests.get(url='https://www.toutiao.com/', headers=header2)
cookie['tt_webid'] = req.cookies.get('tt_webid')


def req(t1, data=data, header1=header1, header2=header2, cookie=cookie):
    response = requests.get(url=url, headers=header1, params=data, cookies=cookie)
    data = response.json()
    print('0')
    # print(data)
    for i in data['data']:
        try:
            title_ = i['title']
            if Blogs.objects.filter(title=title_).first():
                print('------------------')
                continue
            digest_ = i['abstract']
            category_id_ = t1
            try:
                index_image_url_ = i['middle_image']
            except:
                try:
                    index_image_url_ = 'https://' + i['image_url']
                except:
                    index_image_url_ = 'https://' + i['media_avatar_url']
            url_1 = 'https://www.toutiao.com' + i['source_url']
            # print(url_1)
            response = requests.get(url=url_1, headers=header2, cookies=cookie, timeout=5)
            datas = response.content.decode()
            data = re.search(r"content: \'(.*)\',\n", datas)
            content_ = html_parser.unescape(data.group(1))
            if not data:
                data = re.search(r"\<article\>([\s\S]+)\</article\>", datas)
                content_ = data.group(1)
                if not data:
                    print(0, '---------')
                    continue
            source_ = i['source']
            create_time_ = datetime.fromtimestamp(i['behot_time'])
            time.sleep(10)
        except:
            continue
        print(1, '---------')
        Blogs.objects.create(title=title_, digest=digest_, category_id=category_id_,
                             index_image_url=index_image_url_,
                             content=content_, source=source_, create_time=create_time_)
        print(2, '-------------')


def create_new():
    for s in range(0, 7):
        t1 = s + 1
        data['category'] = list_[s]
        req(t1=t1)
        time.sleep(5)
    time.sleep(3600)


if __name__ == '__main__':
    create_new()
    time.sleep(10)
    print(1)
    time.sleep(10)
    print(2)
