# _*_ coding: utf-8 _*_
# @Author   : Wei Yue
# @Time     : 2024-09-12 16:10
# @Function : 中国经济网 爬虫国际要闻 商务资讯 国际经济
import requests
from datetime import datetime
from lxml import etree

from automotiveNews.common.constant import HEADERS, CarTopics
from automotiveNews.common.model import Article, ArticleDetail
from automotiveNews.crawler.my_doucument import MyDocument
from common.utils import simplify_content


class ChinaEconomicNetCrawler:
    def __init__(self, url, days, topic: CarTopics):
        self.url = url
        self.days = days
        self.topic = topic

    def convert_to_full_url_international_news(self, path):
        if path.startswith('../../'):
            return 'http://intl.ce.cn' + path[5:]
        elif path.startswith('./'):
            return 'http://intl.ce.cn/guoji/guojiyaowen' + path[1:]
        else:
            return path

    def convert_to_full_url_international_economy(self, path):
        if path.startswith('../../'):
            return 'http://intl.ce.cn' + path[5:]
        elif path.startswith('./'):
            return 'http://intl.ce.cn/guoji/jingji' + path[1:]
        else:
            return path

    def startCrawlInternationalNews(self, articles: list):
        print(f'中国经济网国际要闻{self.topic.value}板块爬虫启动...')
        current_date = datetime.now()
        current_year = current_date.year
        resp = requests.get(self.url + '/guoji/guojiyaowen/', headers=HEADERS)
        resp.encoding = resp.apparent_encoding
        tree = etree.HTML(resp.text)
        u_list_items = tree.xpath('//ul[@class="lbcon"]/li')
        cnt = 0
        len_articles = len(articles)
        for item in u_list_items:
            title = item.xpath('./a/text()')[0]
            if '车' in title:
                date = item.xpath('./text()')[1].replace('\xa0', '').strip()
                date_obj = datetime.strptime(date, '%m/%d').replace(year=current_year)
                delta = date_obj.date() - current_date.date()
                # 判断差异是否大于10天
                if abs(delta.days) > self.days:
                    continue
                print(f"爬取到中国经济网文章【{title}】")
                href = item.xpath('./a/@href')[0]
                href = self.convert_to_full_url_international_news(href)
                articles.append(Article('中国经济网', title, href, date_obj.strftime('%Y-%m-%d'), self.topic))
                cnt += 1
        resp.close()
        # 处理article
        for i in range(cnt):
            resp = requests.get(articles[len_articles + i].url, headers=HEADERS)
            resp.encoding = resp.apparent_encoding
            tree = etree.HTML(resp.text)
            content_data = tree.xpath('//*[@id="article_p"]//p/text()')
            # 先将所有段落合并为一个字符串
            content_data = simplify_content(content_data)
            combined_text = '\n'.join(content_data)
            articles[len_articles + i].article_detail = ArticleDetail(combined_text, [])
            resp.close()
        print(f'中国经济网国际要闻{self.topic.value}板块爬虫完毕！')

    def startCrawlInternationalEconomy(self, articles: list):
        print(f'中国经济网国际经济{self.topic.value}板块爬虫启动...')
        current_date = datetime.now()
        current_year = current_date.year
        print(self.url + '/guoji/jingji/')
        resp = requests.get(self.url + '/guoji/jingji/', headers=HEADERS)
        resp.encoding = resp.apparent_encoding
        tree = etree.HTML(resp.text)
        u_list_items = tree.xpath('//ul[@class="con"]/li')
        cnt = 0
        len_articles = len(articles)
        for item in u_list_items:
            title = item.xpath('./span/a/text()')[0]
            if '车' in title:
                date = item.xpath('./span[2]/text()')[0]
                date_obj = datetime.strptime(date, '%m/%d %H:%M').replace(year=current_year)
                delta = date_obj.date() - current_date.date()
                # 判断差异是否大于10天
                if abs(delta.days) > self.days:
                    continue
                print(f"爬取到中国经济网文章【{title}】")
                href = item.xpath('./span/a/@href')[0]
                href = self.convert_to_full_url_international_economy(href)
                articles.append(Article('中国经济网', title, href, date_obj.strftime('%Y-%m-%d %H:%M'), self.topic))
                cnt += 1
        resp.close()
        # 处理article
        for i in range(cnt):
            resp = requests.get(articles[len_articles + i].url, headers=HEADERS)
            resp.encoding = resp.apparent_encoding
            tree = etree.HTML(resp.text)
            content_data = tree.xpath('//*[@id="article_p"]//p/text()')
            # 先将所有段落合并为一个字符串
            content_data = simplify_content(content_data)
            combined_text = '\n'.join(content_data)
            articles[len_articles + i].article_detail = ArticleDetail(combined_text,[])
            resp.close()
        print(f'中国经济网国际经济{self.topic.value}板块爬虫完毕！')


if __name__ == '__main__':
    china_economic_net = ChinaEconomicNetCrawler('http://intl.ce.cn', 7,
                                                 CarTopics.INTERNATIONAL_VIEW)
    articles = []
    china_economic_net.startCrawlInternationalNews(articles)
    china_economic_net.startCrawlInternationalEconomy(articles)
    for article in articles:
        print(article)
    # myDoc = MyDocument(articles)
    # myDoc.generate_docs()
