# _*_ coding: utf-8 _*_
# @Author   : Wei Yue
# @Time     : 2024-09-06 10:48
# @Function : 车质网  零部件页面 爬虫
import requests
from datetime import datetime, timedelta
from lxml import etree
import re
from automotiveNews.common.constant import HEADERS, CarTopics
from automotiveNews.common.model import Article, ArticleDetail
from automotiveNews.crawler.my_doucument import MyDocument
from common.utils import simplify_content


class CheZhiCrawler():
    def __init__(self, url, days, topic: CarTopics):
        self.url = url
        self.days = days
        self.topic = topic

    def startCrawl(self, articles: list):
        print(f'车质网{self.topic.value}板块爬虫启动...')
        cnt = 0
        len_articles = len(articles)
        current_date = datetime.now()
        resp = requests.get(self.url, headers=HEADERS)
        tree = etree.HTML(resp.text)
        news_items = tree.xpath('//*[@class="news_nr"]')
        resp.close()
        for item in news_items:
            date = item.xpath('.//div[@class="sj"]/span/text()')[0]
            # 分情况讨论
            specific_date = datetime.strptime(date, "%Y-%m-%d %H:%M")
            days_difference = (current_date - specific_date).days
            if days_difference > self.days:
                continue
            href = item.xpath('./h2/a/@href')[0]
            title = item.xpath('./h2/a/text()')[0]
            print(f"爬取到车质网文章【{title}】")
            articles.append(Article('车质网', title, href, date, self.topic))
            cnt += 1
        # 处理article
        for i in range(cnt):
            resp = requests.get(articles[len_articles + i].url, headers=HEADERS)
            resp.encoding = resp.apparent_encoding
            tree = etree.HTML(resp.text)
            content_data = tree.xpath(
                '//div[@class="show"]/p/text()')
            # 先将所有段落合并为一个字符串
            content_data = simplify_content(content_data)
            combined_text = '\n'.join(content_data)

            # 替换全角空格和非断空格为普通空格
            # cleaned_text = combined_text.replace('\u3000', '').replace('\u00a0', '')
            img_urls = tree.xpath('//div[@class="show"]/p/img/@src')
            articles[len_articles + i].article_detail = ArticleDetail(combined_text, img_urls)
            resp.close()
        print(f'车质网{self.topic.value}板块爬虫完毕！')


if __name__ == '__main__':
    chezhi = CheZhiCrawler('https://www.12365auto.com/parts/index.shtml', 3, CarTopics.OEM)
    articles = []
    chezhi.startCrawl(articles)
    for article in articles:
        print(article)
    myDoc = MyDocument(articles)
    myDoc.generate_docs()
