# _*_ coding: utf-8 _*_
# @Author   : Wei Yue
# @Time     : 2024-09-06 16:12
# @Function :
# _*_ coding: utf-8 _*_
# @Author   : Wei Yue
# @Time     : 2024-09-06 13:10
# @Function : 太平洋汽车  技术快讯 爬虫
import re
from datetime import datetime
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

from common.constant import SEARCHWORDS_TOPICS
from common.model import Crawler, Article, ArticleDetail
import time
from selenium.webdriver.common.by import By
from common.constant import SEARCHWORDS
from common.utils import simplify_content
from crawler.my_doucument import MyDocument


class DongCheDiSelenium(Crawler):
    def __init__(self, url, days, keywords: list):
        super().__init__(url)
        self.days = days
        self.keywords = keywords

    def startCrawl(self, articles: list):
        print('懂车帝关键字爬虫开始...')
        current_date = datetime.now()
        current_year = current_date.year
        cnt = 0
        len_articles = len(articles)
        for searchWord in self.keywords:
            time.sleep(1)
            print('关键字【%s】开始检索...' % searchWord)
            if self.getPageNumber() == 0:
                self.openPage(self.url)
            searchInput = self.browser.find_element(By.XPATH,
                                                    '//input[@type="text"][contains(@class,"search-form")]')
            searchInput.clear()
            searchInput.send_keys(searchWord)
            searchBtn = self.browser.find_element(By.XPATH, '//button[@type="submit"]')
            searchBtn.click()
            # 等待打开第二个页面
            wait = WebDriverWait(self.browser, 10)
            wait.until(EC.number_of_windows_to_be(2))
            # 切换到新窗口
            self.switchToNewWindow()
            # 获取文章列表
            print(f'正在获取关键字【{searchWord}】下的文章列表...')
            articleList = self.browser.find_elements(By.XPATH,
                                                     '//section/div[@class="common-card_wrapper__Inr_n "]')
            for article_item in articleList:
                # 重新获取元素引用并重试操作
                date_elements = article_item.find_elements(By.XPATH, './/a/span')
                if len(date_elements) < 2:
                    continue
                else:
                    date = date_elements[1].text
                if '天前' in date:
                    match = re.search(r'(\d+)天', date)
                    if int(match.group(1)) > self.days:
                        continue
                elif date.count('-') >= 2:
                    specific_date = datetime.strptime(date, "%Y-%m-%d")
                    days_difference = (current_date - specific_date).days
                    if days_difference > self.days:
                        continue
                elif date.count('-') == 1:
                    specific_date = datetime.strptime(date, "%m-%d")
                    specific_date = specific_date.replace(year=current_year)
                    days_difference = (current_date - specific_date).days
                    if days_difference > self.days:
                        continue

                link = article_item.find_element(By.XPATH, './/a[@title]').get_attribute('href')
                if 'video' in link:
                    # 跳过视频内容
                    continue
                # 获取标题
                title = article_item.find_element(By.XPATH, './/h3/a[@title]').text
                print(f"爬取到懂车帝文章【{title}】")
                # 获取日期
                articles.append(
                    Article('懂车帝', title, link, date, topic=SEARCHWORDS_TOPICS[searchWord], keyword=searchWord))
                cnt += 1
            print(f'懂车帝关键字【{searchWord}】爬取完毕')
            self.closeWindowKeepFirst()
        for i in range(cnt):
            print(f"正在获取文章【{articles[len_articles + i].title}】的详细内容...")
            self.openPage(articles[len_articles + i].url)
            content_data = self.browser.find_elements(By.XPATH, '//section[@id="article"]/p')
            content_data = [content.text for content in content_data]
            content_data = simplify_content(content_data)
            content_text = '\n'.join(['    ' + content for content in content_data])
            img_list = self.browser.find_elements(By.XPATH, '//section[@id="article"]//img')
            img_url_list = [img.get_attribute('src') for img in img_list]
            articles[len_articles + i].article_detail = ArticleDetail(content_text, img_url_list)
        print('懂车帝关键字全部爬虫完毕！')
        self.release()


if __name__ == '__main__':
    articles = []
    dongchedi = DongCheDiSelenium('https://www.dongchedi.com', 30, SEARCHWORDS)
    dongchedi.startCrawl(articles)
    for article in articles:
        print(article)
    myDoc = MyDocument(articles)
    myDoc.generate_docs()
