# _*_ coding: utf-8 _*_
# @Author   : Wei Yue
# @Time     : 2024-09-05 16:28
# @Function : 易车网 技术页面爬虫
import requests
from datetime import datetime, timedelta
from lxml import etree
import re
from automotiveNews.common.constant import HEADERS, CarTopics
from automotiveNews.common.model import Article, ArticleDetail
from automotiveNews.crawler.my_doucument import MyDocument
from common.utils import simplify_content


class YiCheCrawler():
    def __init__(self, url, days, topic: CarTopics):
        self.url = url
        self.days = days
        self.topic = topic

    def startCrawl(self, articles: list):
        print(f'易车网站{self.topic.value}板块爬虫启动...')
        cnt = 0
        len_articles = len(articles)
        current_date = datetime.now()
        current_year = current_date.year
        resp = requests.get(self.url, headers=HEADERS)
        tree = etree.HTML(resp.text)
        news_items = tree.xpath('//*[@class="news-item"]')
        resp.close()
        for item in news_items:
            date = item.xpath('./a//span[@class="time-text"]/text()')[0]
            # 分情况讨论
            if '前' in date:
                match = re.search(r'(\d+)天', date)
                if int(match.group(1)) > self.days:
                    continue
            elif '年' in date:
                specific_date = datetime.strptime(date, "%Y年%m月%d日")
                days_difference = (current_date - specific_date).days
                if days_difference > self.days:
                    continue
            else:
                specific_date = datetime.strptime(date, "%m月%d日")
                specific_date = specific_date.replace(year=current_year)
                days_difference = (current_date - specific_date).days
                if days_difference > self.days:
                    continue

            href = item.xpath('./a/@href')[0]
            href = 'https://news.yiche.com' + href
            title = item.xpath('./a//h3[@class="title"]/text()')[0]
            print(f"爬取到易车文章【{title}】")
            articles.append(Article('易车', title, href, date, self.topic))
            cnt += 1
        # 处理article
        for i in range(cnt):
            resp = requests.get(articles[len_articles + i].url, headers=HEADERS)
            tree = etree.HTML(resp.text)
            content_data = tree.xpath(
                '//div[@class="news-detail-main motu_cont"]/p/text()')
            # 先将所有段落合并为一个字符串
            content_data = simplify_content(content_data)
            combined_text = '\n'.join('    ' + text for text in content_data)
            # 替换全角空格和非断空格为普通空格
            img_urls = tree.xpath('//div[@class="news-detail-main motu_cont"]/p/img/@data-original')
            articles[len_articles + i].article_detail = ArticleDetail(combined_text, img_urls)
            resp.close()
        print(f'易车网站{self.topic.value}板块爬虫完毕！')


if __name__ == '__main__':
    yiche = YiCheCrawler('https://news.yiche.com/jishu/', 14, topic=CarTopics.FRONTIER_TECH)
    articles = []
    yiche.startCrawl(articles)
    myDoc = MyDocument(articles)
    myDoc.generate_docs()
