# _*_ coding: utf-8 _*_
# @Author   : Wei Yue
# @Time     : 2024-09-06 13:10
# @Function : 太平洋汽车  技术快讯 爬虫
import requests
from datetime import datetime
from lxml import etree
from automotiveNews.common.constant import HEADERS, CarTopics
from automotiveNews.common.model import Article, ArticleDetail
from automotiveNews.crawler.my_doucument import MyDocument
from common.utils import simplify_content


class PacificAutoCrawler:
    def __init__(self, url, days, topic: CarTopics):
        self.url = url
        self.days = days
        self.topic = topic

    def startCrawl(self, articles: list):
        print(f'太平洋汽车{self.topic.value}板块爬虫启动...')
        cnt = 0
        len_articles = len(articles)
        current_date = datetime.now()
        resp = requests.get(self.url, headers=HEADERS)
        # resp.encoding = resp.apparent_encoding
        resp.encoding = 'gb2312'
        tree = etree.HTML(resp.text)
        news_items = tree.xpath('//div[@class="pic-txt clearfix"]')
        resp.close()
        for item in news_items:
            date = item.xpath('.//span[@class="time"]/text()')[0]
            # 分情况讨论
            specific_date = datetime.strptime(date, "%Y-%m-%d")
            days_difference = (current_date - specific_date).days
            if days_difference > self.days:
                continue
            href = item.xpath('./a/@href')[0].lstrip('/')
            href = 'https://' + href
            title = item.xpath('./div[@class="txt"]/p/a/text()')[0]
            print(f"爬取到太平洋汽车文章【{title}】")
            articles.append(Article('太平洋汽车', title, href, date, self.topic))
            cnt += 1
        # 处理article
        for i in range(cnt):
            resp = requests.get(articles[len_articles + i].url, headers=HEADERS)
            resp.encoding = resp.apparent_encoding
            tree = etree.HTML(resp.text)
            content_data = tree.xpath(
                '//div[@class="artText clearfix"]/p/text()')
            # 先将所有段落合并为一个字符串
            content_data = simplify_content(content_data)
            combined_text = '\n'.join(content_data)
            #
            #     # 替换全角空格和非断空格为普通空格
            # cleaned_text = combined_text.replace('\u3000', '').replace('\u00a0', '')
            img_urls = tree.xpath('//div[@class="artText clearfix"]//a[not(@class)]/@href')
            cleaned_urls = ['https:' + url.split('?')[1].split('&')[0] for url in img_urls if '?' in url]
            resp.close()
            articles[len_articles + i].article_detail = ArticleDetail(combined_text, cleaned_urls)
        print(f'太平洋汽车{self.topic.value}板块爬虫完毕！')


if __name__ == '__main__':
    pacificAuto = PacificAutoCrawler('http://www.pcauto.com.cn/tech/kuaixun/', 7, CarTopics.FRONTIER_TECH)
    articles = []
    pacificAuto.startCrawl(articles)
    for article in articles:
        print(article)
    # myDoc = MyDocument(articles)
    # myDoc.generate_docs()
