import re
import requests
import execjs
import time
from html.parser import HTMLParser
from datetime import datetime

from new_list.models import Blogs

html_parser = HTMLParser()


class TouTiao():

    def __init__(self):
        self.base_url = 'https://www.toutiao.com/'
        self.base_header = {
            'user-agent': "5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko"
        }
        self.utm_source = ['__all__', 'news_hot', 'news_tech', 'news_game', 'news_car', 'news_finance', 'funny']
        self.json_url = 'https://www.toutiao.com/api/pc/feed/'
        self.form_data = {
            'category': 'news_hot',
            'utm_source': 'toutiao',
            'widen': '1',
            'max_behot_time': '0',
            'max_behot_time_tmp': '0',
            'tadrequire': 'true',
        }

    def get_cs_cp_sgin(self):
        f = open(r"/Users/zeng/PycharmProjects/django_web_new/new_list/signature.js", 'r', encoding='UTF-8')  ##打开JS文件
        line = f.readline()
        htmlstr = ''
        while line:
            htmlstr = htmlstr + line
            line = f.readline()
        ctx = execjs.compile(htmlstr)
        as_cp_signature = eval(ctx.call('get_as_cp_signature'))
        return as_cp_signature

    def get_from_data(self, t1):
        """获取表单数据，更新种类，as，cp，sign"""
        as_cp_signature = self.get_cs_cp_sgin()
        self.form_data['category'] = self.utm_source[t1]
        self.form_data['as'] = as_cp_signature['as']
        self.form_data['cp'] = as_cp_signature['cp']
        self.form_data['_signature'] = as_cp_signature['_signature']
        return self.form_data

    def get_cookie(self):
        """第一次请求，获取到cookie"""
        response = requests.get(self.base_url, headers=self.base_header)
        cookiejar = response.cookies
        cookiedict = requests.utils.dict_from_cookiejar(cookiejar)
        return cookiedict

    def get_content(self, source_url, cookie):
        """获取新闻主体"""
        url_1 = 'https://www.toutiao.com' + source_url
        response = requests.get(url=url_1, headers=self.base_header, cookies=cookie, timeout=5)
        datas = response.content.decode()
        data = re.search(r"content: \'(.*)\',\n", datas)
        content_ = html_parser.unescape(data.group(1))
        if not data:
            data = re.search(r"\<article\>([\s\S]+)\</article\>", datas)
            content_ = data.group(1)
            if not data:
                return None
        return content_

    def save_data(self, data, cookie, t1):
        """保存数据，新闻主体调用get_content"""
        data = data['data']
        for i in data:
            try:
                title_ = i['title']
                if Blogs.objects.filter(title=title_).first():
                    print('------------------')
                    continue
                digest_ = i['abstract']
                category_id_ = t1
                try:
                    index_image_url_ = i['middle_image']
                except:
                    try:
                        index_image_url_ = 'https://' + i['image_url']
                    except:
                        index_image_url_ = 'https://' + i['media_avatar_url']

                content_ = self.get_content(i['source_url'], cookie)
                if content_ is None:
                    print(0, '---------')
                    continue
                source_ = i['source']
                create_time_ = datetime.fromtimestamp(i['behot_time'])
            except:
                continue
            print(1, '---------')
            print(create_time_)
            Blogs.objects.create(title=title_, digest=digest_, category_id=category_id_,
                                 index_image_url=index_image_url_,
                                 content=content_, source=source_, create_time=create_time_)
            print(2, '---------')
            time.sleep(10)

    def main(self, refresh_id=None):

        cookies = self.get_cookie()
        if refresh_id:
            self.utm_source = [self.utm_source[refresh_id]]

        for t1 in range(len(self.utm_source)):
            from_data = self.get_from_data(t1)
            response = requests.get(self.json_url, headers=self.base_header, cookies=cookies, params=from_data)
            self.save_data(response.json(), cookies, t1=t1 + 1)


def create_new():
    toutiao_Spider = TouTiao()
    toutiao_Spider.main()


def refresh_new(refresh_id=None):
    Spider = TouTiao()
    Spider.main(refresh_id)


if __name__ == '__main__':
    toutiao_Spider = TouTiao()
    toutiao_Spider.main()
