# _*_ coding: utf-8 _*_
# @Author   : Wei Yue
# @Time     : 2024-09-05 16:08
# @Function :  汽车之家 行业页面 爬虫
import requests
from datetime import datetime
from lxml import etree

from automotiveNews.common.constant import HEADERS, CarTopics
from automotiveNews.common.model import Article, ArticleDetail
from automotiveNews.crawler.my_doucument import MyDocument
from common.utils import simplify_content


class AutomotiveHomeCrawler:
    def __init__(self, url, days, topic: CarTopics):
        self.url = url
        self.days = days
        self.topic = topic

    def startCrawl(self, articles: list):
        print(f'汽车之家网站{self.topic.value}板块爬虫启动...')
        current_date = datetime.now().date()
        resp = requests.get(self.url, headers=HEADERS)
        tree = etree.HTML(resp.text)
        u_list_items = tree.xpath('//*[@class="u-list-item"]')
        cnt = 0
        len_articles = len(articles)
        for item in u_list_items:
            date = item.xpath('./a//div[@class="more-l"]/span[1]/text()')[0]
            date_obj = datetime.strptime(date, '%Y-%m-%d').date()
            delta = date_obj - current_date
            # 判断差异是否大于10天
            if abs(delta.days) > self.days:
                continue
            href = item.xpath('./a/@href')[0]
            href = 'https:' + href
            title = item.xpath('./a//h3[@class="subtitle"]/text()')[0]
            print(f"爬取到汽车之家文章【{title}】")
            articles.append(Article('汽车之家', title, href, date, self.topic))
            cnt += 1
        resp.close()
        # 处理article
        for i in range(cnt):
            resp = requests.get(articles[len_articles + i].url, headers=HEADERS)
            tree = etree.HTML(resp.text)
            content_data = tree.xpath(
                '//*[@id="articleContent"]//p[@data-paraid]/text() | //*[@id="articleContent"]//p[@class="editor-paragraph"]/span/text() ')
            # 先将所有段落合并为一个字符串
            content_data = simplify_content(content_data)
            content_data = content_data[1:]
            content_data[0] = content_data[0] + '    '
            combined_text = '\n'.join(content_data).lstrip(']').replace('|', '')
            # 打印处理后的完整文本
            img = tree.xpath('//*[@id="articleContent"]//p/a[@target="_blank"]/@href')
            processed_img = ['https:' + link if not link.startswith('http') else link for link in img][1:]
            new_urls = [url.replace('https://www.autohome.com.cn/img/?img=', 'https://www2.autoimg.cn/') for url in
                        processed_img]
            articles[len_articles + i].article_detail = ArticleDetail(combined_text, new_urls)
            resp.close()
        print(f'汽车之家网站{self.topic.value}板块爬虫完毕！')


if __name__ == '__main__':
    autohome = AutomotiveHomeCrawler('https://www.autohome.com.cn/hangye/news/5', 14, CarTopics.OEM)
    articles = []
    autohome.startCrawl(articles)
    myDoc = MyDocument(articles)
    myDoc.generate_docs()
