import re
import time
import random
import logging
import os
import datetime
import requests
import traceback
from tqdm import tqdm
from bs4 import BeautifulSoup, Comment
from selenium import webdriver
from selenium.webdriver.edge.service import Service
from selenium.webdriver.edge.options import Options
from selenium.common.exceptions import WebDriverException
from urllib.parse import urlparse

from core.base_spider import BaseSpider
from config.settings import (
    EDGE_DRIVER_PATH, HEADERS, BASE_DIR, logger
)


class MeiyeNewsSpider(BaseSpider):
    """
    镁业新闻爬虫
    """
    
    def __init__(self):
        super().__init__()
        
    def _clean_text(self, text: str) -> str:
        """
        清理文本内容
        """
        if not text:
            return ""
        text = re.sub(r'[\n\r\t\u3000]+', ' ', text)
        text = re.sub(r' +', ' ', text)
        return text.strip()
        
    def _fetch_article(self, url: str) -> str:
        """
        获取文章内容
        """
        try:
            resp = requests.get(url, headers=HEADERS, timeout=15)
            resp.raise_for_status()
            try:
                html = resp.content.decode('utf-8', errors='strict')
            except UnicodeDecodeError:
                encoding = resp.encoding
                if not encoding or encoding.lower() == 'iso-8859-1':
                    encoding_match = re.search(r'charset=["\']?([\w-]+)', resp.text[:1024], re.I)
                    encoding = encoding_match.group(1) if encoding_match else 'gbk'
                html = resp.content.decode(encoding, errors='ignore')
            soup = BeautifulSoup(html, 'lxml')
            article = (
                    soup.find("div", class_="rich_media_content") or
                    soup.find("div", id="js_content") or
                    soup.find("div", class_="article-content") or
                    soup.body
            )
            if not article:
                return "[错误] 未找到正文区域"
            for tag in article(["script", "style", "noscript", "button", "img", "svg", "iframe", "video"]):
                tag.decompose()
            for comment in article.find_all(string=lambda text: isinstance(text, Comment)):
                comment.extract()
            text = article.get_text(separator=" ", strip=True)
            return self._clean_text(text)
        except requests.exceptions.RequestException as e:
            return f"[网络错误] {str(e)}"
        except Exception as e:
            return f"[解析错误] {str(e)}"
            
    def _login(self):
        """
        登录微信公众号平台
        """
        options = Options()
        options.add_argument('--disable-gpu')
        options.add_argument('--no-sandbox')
        options.add_argument('--disable-dev-shm-usage')
        options.add_argument('--disable-extensions')
        try:
            driver = webdriver.Edge(service=Service(EDGE_DRIVER_PATH), options=options)
        except Exception as e:
            self.logger.error(f"无法启动Edge浏览器: {str(e)}")
            raise
        
        try:
            driver.get('https://mp.weixin.qq.com/')
            self.logger.info('请在浏览器中扫码登录微信公众号平台...')
            start_time = time.time()
            while time.time() - start_time < 180:  # 增加超时时间到3分钟
                if '/cgi-bin/home' in driver.current_url:
                    break
                time.sleep(3)
            else:
                raise RuntimeError('登录超时，请重试')
            time.sleep(2)
            return {c['name']: c['value'] for c in driver.get_cookies()}
        except Exception as e:
            self.logger.error(f'登录过程中出错: {str(e)}')
            raise
        finally:
            driver.quit()
            
    def _get_token(self, cookies: dict) -> str:
        """
        获取token
        """
        try:
            rsp = requests.get('https://mp.weixin.qq.com', cookies=cookies, allow_redirects=False, timeout=10)
            if rsp.status_code != 302:
                raise RuntimeError('未触发重定向，可能Cookie无效')
            location = rsp.headers.get('Location', '')
            if not location:
                raise RuntimeError('重定向地址为空')
            token_match = re.search(r'token=(\d+)', location)
            if not token_match:
                raise RuntimeError('重定向地址中未找到token参数')
            return token_match.group(1)
        except Exception as e:
            self.logger.error(f'获取token失败: {str(e)}')
            raise
            
    def _get_fakeid(self, token: str, cookies: dict, nickname: str) -> str:
        """
        获取公众号fakeid
        """
        try:
            params = {
                'action': 'search_biz',
                'token': token,
                'query': nickname,
                'begin': '0',
                'count': '5',
                'lang': 'zh_CN',
                'f': 'json',
                'ajax': '1',
                'random': random.random()
            }
            rsp = requests.get('https://mp.weixin.qq.com/cgi-bin/searchbiz',
                               params=params,
                               cookies=cookies,
                               timeout=15)
            rsp.raise_for_status()
            data = rsp.json()
            # 检查响应是否包含错误信息
            if isinstance(data, dict) and 'base_resp' in data:
                if data['base_resp'].get('ret') != 0:
                    raise RuntimeError(f"获取公众号列表失败: {data['base_resp'].get('err_msg', '未知错误')}")
            if 'list' not in data or not data['list']:
                raise RuntimeError(f'公众号 "{nickname}" 未找到')
            return data['list'][0]['fakeid']
        except Exception as e:
            self.logger.error(f'获取公众号 {nickname} 的fakeid失败: {str(e)}')
            raise
            
    def _get_wechat_articles(self, token: str, fakeid: str, cookies: dict, days=2) -> list:
        """
        获取微信文章列表
        """
        try:
            start_date = (datetime.datetime.now() - datetime.timedelta(days=days - 1)).strftime('%Y-%m-%d')
            params = {
                'token': token,
                'fakeid': fakeid,
                'action': 'list_ex',
                'begin': '0',
                'count': '20',
                'type': '9',
                'lang': 'zh_CN',
                'f': 'json',
                'ajax': '1',
                'random': random.random()
            }
            rsp = requests.get('https://mp.weixin.qq.com/cgi-bin/appmsg',
                               params=params,
                               cookies=cookies,
                               timeout=15)
            rsp.raise_for_status()
            data = rsp.json()
            # 检查响应是否包含错误信息
            if isinstance(data, dict) and 'base_resp' in data:
                if data['base_resp'].get('ret') != 0:
                    raise RuntimeError(f"获取文章列表失败: {data['base_resp'].get('err_msg', '未知错误')}")
            if 'app_msg_list' not in data:
                raise RuntimeError('响应中未找到文章列表')
            articles = []
            for item in data['app_msg_list']:
                try:
                    pub_time = datetime.datetime.fromtimestamp(item['update_time'])
                    if pub_time.date() >= (datetime.datetime.now() - datetime.timedelta(days=days - 1)).date():
                        articles.append({
                            'title': item['title'],
                            'url': item['link'],
                            'pub_date': pub_time.strftime('%Y-%m-%d %H:%M')
                        })
                except KeyError:
                    continue
            return articles
        except Exception as e:
            self.logger.error(f'获取文章列表失败: {str(e)}')
            raise
            
    def _summarize_with_ai(self, content):
        """
        使用AI总结内容
        """
        try:
            import requests
            import json
            
            # 调用DeepSeek API
            DEEPSEEK_KEY = "sk-d34cd6ffbfe3477b98da40cc8f8f4267"
            MODEL = "deepseek-chat"
            
            headers = {
                "Authorization": f"Bearer {DEEPSEEK_KEY}",
                "Content-Type": "application/json"
            }
            
            prompt = f"""
            你是资深编辑，请对以下内容进行总结，要求如下：
            1. 总结为一句话
            2. 保留关键信息如政策名、标准号、机构名、数字等
            3. 只输出总结结果，不添加任何额外说明
            
            内容如下：
            {content}
            """
            
            payload = {
                "model": MODEL,
                "messages": [{"role": "user", "content": prompt}],
                "max_tokens": 4096,
                "temperature": 0.2,
            }
            
            response = requests.post(
                "https://api.deepseek.com/v1/chat/completions",
                headers=headers,
                json=payload,
                timeout=180
            )
            response.raise_for_status()
            summary = response.json()["choices"][0]["message"]["content"].strip()
            
            return summary
            
        except Exception as e:
            logger.error(f"AI总结出错: {str(e)}")
            import traceback
            traceback.print_exc()
            return "AI总结失败"

    def _summarize_single_article(self, title, content):
        """
        对单篇文章进行AI总结
        """
        if not self.kimi_api_key:
            self.logger.error("未配置Kimi API密钥，无法进行总结")
            return "[未配置API密钥]"

        try:
            from openai import OpenAI
            client = OpenAI(
                api_key=self.kimi_api_key,
                base_url="https://api.moonshot.cn/v1/"
            )
            
            # 构建提示词 - 优化版本，更清晰的格式
            prompt = """请总结这篇文章主要内容，用一句话说明：
1. 什么事件
2. 什么单位或者什么人干了什么事
3. 产生了什么影响
4. 有什么意义

要求：
- 只输出总结内容，其余不输出
- 使用中文
- 保持简洁，不超过150字

文章标题: {title}
文章内容: {content[:4000]}""".format(title=title, content=content)  # 限制内容长度

            response = client.chat.completions.create(
                model=self.model,
                messages=[
                    {"role": "system", "content": "你是一个专业的内容总结助手，专注于提炼文章核心信息。"},
                    {"role": "user", "content": prompt}
                ],
                temperature=0.3,  # 更低的温度值确保输出更稳定、更可预测
                max_tokens=150
            )
            summary = response.choices[0].message.content.strip()
            return summary
        except Exception as e:
            self.logger.error(f"总结文章失败: {title} - {str(e)}")
            return f"[总结失败: {str(e)}]"
            
    def crawl(self):
        """
        爬取镁业新闻
        """
        try:
            self.logger.info("开始获取镁业新闻...")
            cookies = self._login()
            self.logger.info('登录成功，获取cookies完成')
            token = self._get_token(cookies)
            self.logger.info(f'获取到token: {token}')
            nicknames = ['元镁体', '尚镁网', '府谷镁']
            all_articles = []
            summaries = []  # 存储每篇文章的总结结果
            summary_counter = 1  # 总结序号计数器

            # 创建原始内容文件
            today_date = datetime.date.today().strftime("%Y%m%d")
            raw_filename = os.path.join(BASE_DIR, "yuanshineirong", f"原始内容_{today_date}.txt")
            with open(raw_filename, 'a', encoding='utf-8') as raw_file:
                raw_file.write("\n\n======= 镁业新闻原始内容 =======\n\n")

                for nickname in nicknames:
                    try:
                        self.logger.info(f'处理公众号: {nickname}')
                        fakeid = self._get_fakeid(token, cookies, nickname)
                        articles = self._get_wechat_articles(token, fakeid, cookies, days=2)
                        self.logger.info(f'找到{len(articles)}篇相关文章')
                        if not articles:
                            self.logger.warning(f'公众号 {nickname} 没有相关文章')
                            continue

                        for article in tqdm(articles, desc=f'抓取{nickname}内容'):
                            content = self._fetch_article(article['url'])
                            # 写入原始内容
                            raw_file.write(f"【公众号】: {nickname}\n")
                            raw_file.write(f"【标题】: {article['title']}\n")
                            raw_file.write(f"【发布时间】: {article['pub_date']}\n")
                            raw_file.write(f"【URL】: {article['url']}\n")
                            raw_file.write(f"【内容】:\n{content}\n")
                            raw_file.write("-" * 80 + "\n\n")

                            time.sleep(random.uniform(1, 3))
                    except Exception as e:
                        self.logger.error(f'处理公众号{nickname}时出错: {str(e)}')
                        # 将错误信息写入原始内容文件
                        raw_file.write(f"处理公众号{nickname}时出错: {str(e)}\n")

            self.logger.info("镁业新闻原始内容已保存")
        except Exception as e:
            self.logger.error(f'获取镁业新闻失败: {str(e)}')
            traceback.print_exc()
            return ["无"]