# _*_ coding: utf-8 _*_
# @Author   : Wei Yue
# @Time     : 2024-09-26 13:12
# @Function :  微信公众号爬虫： 旺材动力总成 旺材电机与电控
import random
import time
import requests
from datetime import datetime
from lxml import etree
from automotiveNews.common.constant import HEADERS, CarTopics, WANG_CAI_DLZC, USER_AGENT_LIST, WechatPublic, \
    WANG_CAI_DJYDK, XI_MO_DJQ, GAI_SHI_QCSQ, RIO,CHUANGWEI,JISHI
from automotiveNews.common.model import Article, ArticleDetail
from automotiveNews.crawler.my_doucument import MyDocument
from common.utils import simplify_content


class WechatPublicCrawler:
    def __init__(self, url, days, topic: CarTopics):
        self.url = url
        self.days = days
        self.topic = topic

    def startCrawlWithPublicName(self, page: int, articles: list, wechat_public: WechatPublic):
        print(f'微信公众号{self.topic.value}板块:{wechat_public.name}爬虫启动...')
        cnt = 0
        len_articles = len(articles)
        current_date = datetime.now()
        for i in range(page):
            wechat_public.data["begin"] = i * 5
            user_agent = random.choice(USER_AGENT_LIST)
            headers = {
                "Cookie": wechat_public.cookie,
                "User-Agent": user_agent,
            }
            # 使用get方法进行提交
            content_json = requests.get(self.url, headers=headers, params=wechat_public.data).json()
            # 返回了一个json，里面是每一页的数据
            for item in content_json["app_msg_list"]:
                current_date = datetime.now()
                t = time.localtime(item["create_time"])
                specific_date = datetime(t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
                date_str = specific_date.strftime("%Y-%m-%d %H:%M:%S")
                days_difference = (current_date - specific_date).days
                if days_difference > self.days:
                    continue
                # 提取每页文章的标题及对应的url
                title = item['title']
                href = item['link']
                print(f"爬取到{wechat_public.name}公众号文章【{title}】")
                articles.append(Article(wechat_public.name, title, href, date_str, self.topic))
                cnt += 1
            if page > 1:
                print('防止爬取过快而被服务器识别，等待15-25秒')
                time.sleep(random.randint(15, 25))
        # 处理article
        for i in range(cnt):
            resp = requests.get(articles[len_articles + i].url, headers=HEADERS)
            resp.encoding = resp.apparent_encoding
            tree = etree.HTML(resp.text)
            content_data = tree.xpath(
                '//div[@id="js_content"]//p//text()|//div[@id="js_image_content"]//text()')
            if len(content_data) < 3:
                content_data = tree.xpath(
                    '//div[@id="js_content"]//span//text()|//div[@id="js_image_content"]//text()')
            # 先将所有段落合并为一个字符串
            content_data = simplify_content(content_data)
            combined_text = '\n'.join(content_data)
            img_urls = tree.xpath('//div[@id="js_content"]//img/@data-src')
            if articles[len_articles + i].source in['智驭动力系统' ,'旺材电机与电控'] and len(img_urls)>3:
                img_urls = img_urls[2:]
            articles[len_articles + i].article_detail = ArticleDetail(combined_text, img_urls)
            resp.close()
        print(f'微信公众号{self.topic.value}板块:{wechat_public.name}爬虫完毕！')


if __name__ == '__main__':
    wechat_public_crawler = WechatPublicCrawler('https://mp.weixin.qq.com/cgi-bin/appmsg', 7, CarTopics.FRONTIER_TECH)
    articles = []
    # 爬取旺材动力总成 旺材电机与电控 西莫电机圈 盖世汽车社区
    wechat_public_crawler.startCrawlWithPublicName(1, articles, WANG_CAI_DLZC)
    wechat_public_crawler.startCrawlWithPublicName(1, articles, WANG_CAI_DJYDK)
    wechat_public_crawler.startCrawlWithPublicName(1, articles, XI_MO_DJQ)
    wechat_public_crawler.startCrawlWithPublicName(1, articles, GAI_SHI_QCSQ)
    wechat_public_crawler.startCrawlWithPublicName(1, articles, RIO)
    wechat_public_crawler.startCrawlWithPublicName(1, articles, CHUANGWEI)
    wechat_public_crawler.startCrawlWithPublicName(1, articles, JISHI)
    # for article in articles:
    #     print(article)
    myDoc = MyDocument(articles)
    myDoc.generate_docs()
