# _*_ coding: utf-8 _*_
# @Author   : Wei Yue
# @Time     : 2024-09-23 17:33
# @Function : NE-时代  NE资讯爬虫
import pytz
import requests
from datetime import datetime
from lxml import etree
from automotiveNews.common.constant import HEADERS, CarTopics
from automotiveNews.common.model import Article, ArticleDetail
from automotiveNews.crawler.my_doucument import MyDocument
from common.utils import simplify_content


class NETimeCrawler():
    def __init__(self, url, days, topic: CarTopics = None):
        self.url = url
        self.days = days
        self.topic = topic

    def startCrawlInformation(self, articles: list, topic: CarTopics):
        print(f'NE时代{topic.value}板块爬虫启动...')
        cnt = 0
        len_articles = len(articles)
        current_date = datetime.now(tz=pytz.timezone('Asia/Shanghai'))
        for page in range(1, 6):
            data = {
                'page': page,
                'type': '【NE资讯',
                'doc_id': 0
            }
            resp = requests.post(self.url + '/link1', headers=HEADERS, data=data)
            content_list = resp.json()['list']
            resp.close()
            for content in content_list:
                date_str = content['create_time']
                date_obj = datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%S.%f%z")
                days_difference = (current_date - date_obj).days
                if days_difference > self.days:
                    continue
                href = 'https://ne-time.cn/web/article/' + str(content['id'])
                title = content['title']
                articles.append(Article('NE时代', title, href, date_obj.strftime("%Y-%m-%d"), topic))
                cnt += 1
        # 处理article
        for i in range(cnt):
            resp = requests.get(articles[len_articles + i].url, headers=HEADERS)
            resp.encoding = resp.apparent_encoding
            tree = etree.HTML(resp.text)
            content_data = tree.xpath(
                '//article[@id="content"]//span/text() | //article[@id="content"]//p/text()')
            # 先将所有段落合并为一个字符串
            content_data = simplify_content(content_data)
            combined_text = '\n'.join(["    " + item for item in content_data])
            # 替换全角空格和非断空格为普通空格
            img_urls = tree.xpath('//article[@id="content"]/p/img/@src')
            articles[len_articles + i].article_detail = ArticleDetail(combined_text, img_urls)
            resp.close()
        print(f'NE时代{topic.value}板块爬虫完毕！')

    def startCrawlOtherSection(self, articles: list, type: int, topic: CarTopics):
        print(f'NE时代{topic.value}板块爬虫启动...')
        cnt = 0
        len_articles = len(articles)
        current_date = datetime.now()
        data = {
            'page': 1,
            'limit': 20,
            'type': type
        }
        resp = requests.post(self.url + '/handleArticleList', headers=HEADERS, data=data)
        content_list = resp.json()['obj']['rows']
        resp.close()
        for content in content_list:
            date_timestamp = content['create_time']
            date_obj = datetime.fromtimestamp(date_timestamp / 1000.0)
            days_difference = (current_date - date_obj).days
            if days_difference > self.days:
                continue
            href = 'https://ne-time.cn/web/article/' + str(content['id'])
            title = content['title']
            articles.append(Article('NE时代', title, href, date_obj.strftime("%Y-%m-%d"), topic))
            cnt += 1
        # 处理article
        for i in range(cnt):
            resp = requests.get(articles[len_articles + i].url, headers=HEADERS)
            resp.encoding = resp.apparent_encoding
            tree = etree.HTML(resp.text)
            content_data = tree.xpath(
                '//article[@id="content"]//span/text()')
            if len(content_data) < 2:
                content_data = tree.xpath(
                    '//article[@id="content"]/p/text()')
            # 先将所有段落合并为一个字符串
            content_data = simplify_content(content_data)
            combined_text = '\n'.join(["    " + item for item in content_data])
            # 替换全角空格和非断空格为普通空格
            img_urls = tree.xpath('//article[@id="content"]/p/img/@src')
            articles[len_articles + i].article_detail = ArticleDetail(combined_text, img_urls)
            resp.close()
        print(f'NE时代{topic.value}板块爬虫完毕！')

if __name__ == '__main__':
    ne_time = NETimeCrawler('https://ne-time.cn/web/article', 3)
    articles = []
    ne_time.startCrawlInformation(articles, CarTopics.MARKET_INFO)
    # 2代表电驱动板块，103代表半导体板块,4代表政策法规板块,19代表整车OEM板块
    ne_time.startCrawlOtherSection(articles, 2, CarTopics.ELECTRIC_DRIVE)
    ne_time.startCrawlOtherSection(articles, 103, CarTopics.FRONTIER_TECH)
    ne_time.startCrawlOtherSection(articles, 4, CarTopics.POLICY_ANALYSIS)
    ne_time.startCrawlOtherSection(articles, 19, CarTopics.OEM)
    for article in articles:
        print(article)
    myDoc = MyDocument(articles)
    myDoc.generate_docs()
