import re
import os
from datetime import date
from bs4 import BeautifulSoup
from config.settings import logger, BASE_DIR
from core.base_spider import BaseSpider


class NationalNewsSpider(BaseSpider):
    """
    国内国际新闻爬虫 (第一财经 + 东方财富)
    """

    def _get_first_article_url(self):
        """
        获取第一财经早报文章链接
        """
        driver = self._get_driver(headless=True)
        try:
            # 使用更精确的搜索链接
            driver.get('https://www.yicai.com/search?keys=%E6%97%A9%E6%8A%A5')
            logger.info("正在搜索第一财经晨报...")
            from selenium.webdriver.common.by import By
            from selenium.webdriver.support.ui import WebDriverWait
            from selenium.webdriver.support import expected_conditions as EC
            import time
            time.sleep(5)
            
            # 等待页面加载完成
            WebDriverWait(driver, 20).until(
                lambda d: d.execute_script("return document.readyState") == "complete"
            )
            
            # 尝试多种选择器查找新闻链接
            selectors = [
                "div.news-item a",
                "a[href*='/news/']",
                ".f-main-news-list a",
                ".news-list a",
                "a[href*='news']",
                "a"
            ]
            
            links = []
            for selector in selectors:
                try:
                    elements = driver.find_elements(By.CSS_SELECTOR, selector)
                    if elements:
                        links.extend(elements)
                        logger.debug(f"选择器 {selector} 找到 {len(elements)} 个元素")
                        if len(links) >= 20:  # 足够多的链接就停止
                            break
                except Exception as e:
                    logger.debug(f"使用选择器 {selector} 查找元素时出错: {e}")
                    continue
                    
            if not links:
                logger.warning("未找到任何新闻链接")
                # 保存页面源码用于调试
                with open(os.path.join(BASE_DIR, "debug_yicai_search.html"), "w", encoding="utf-8") as f:
                    f.write(driver.page_source)
                return None
                
            logger.info(f"总共找到 {len(links)} 个链接")
                
            # 遍历链接查找早报文章
            for i, link in enumerate(links[:30]):  # 只检查前30个链接
                try:
                    title = link.text.strip()
                    href = link.get_attribute('href')
                    logger.debug(f"检查链接 {i}: 标题='{title}', 链接='{href}'")
                    if href and ('晨报' in title or '早报' in title or '早报' in href):
                        full_url = href if href.startswith('http') else 'https://www.yicai.com' + href
                        logger.info(f"找到目标文章: {title} - {full_url}")
                        return full_url
                except Exception as e:
                    logger.debug(f"检查链接 {i} 时出错: {e}")
                    continue
                    
            # 如果没找到明确的早报，尝试使用更宽松的匹配
            soup = BeautifulSoup(driver.page_source, 'html.parser')
            news_links = soup.find_all('a', href=True)
            logger.info(f"使用BeautifulSoup找到 {len(news_links)} 个链接")
            
            for i, link in enumerate(news_links[:50]):  # 检查前50个链接
                try:
                    title_elem = link.find(['h2', 'h3', 'div', 'span'], string=re.compile(r'早报|晨报'))
                    link_text = link.get_text(strip=True)
                    title_attr = link.get('title', '')
                    href = link.get('href', '')
                    
                    logger.debug(f"BS检查链接 {i}: 文本='{link_text}', 标题属性='{title_attr}', 链接='{href}'")
                    
                    if title_elem or '早报' in link_text or '晨报' in link_text or '早报' in title_attr or '晨报' in title_attr:
                        if href:
                            full_url = href if href.startswith('http') else "https://www.yicai.com" + href
                            logger.info(f"找到早报文章: {link_text or title_attr} - {full_url}")
                            return full_url
                except Exception as e:
                    logger.debug(f"BS检查链接 {i} 时出错: {e}")
                    continue
                    
            logger.warning("未找到匹配的晨报/早报文章链接")
            # 保存页面源码用于调试
            with open(os.path.join(BASE_DIR, "debug_yicai_search.html"), "w", encoding="utf-8") as f:
                f.write(driver.page_source)
            return None
        except Exception as e:
            logger.error(f"获取第一财经晨报链接时出错: {str(e)}")
            import traceback
            traceback.print_exc()
            return None
        finally:
            driver.quit()

    def _parse_sections(self, url):
        """
        解析文章各板块内容
        """
        driver = self._get_driver(headless=True)
        try:
            driver.get(url)
            logger.info(f"正在加载文章内容: {url}")
            from selenium.webdriver.common.by import By
            from selenium.webdriver.support.ui import WebDriverWait
            from selenium.webdriver.support import expected_conditions as EC
            import time
            time.sleep(5)
            
            # 等待页面加载完成
            WebDriverWait(driver, 20).until(
                lambda d: d.execute_script("return document.readyState") == "complete"
            )
            
            # 保存页面用于调试
            with open(os.path.join(BASE_DIR, "debug_yicai_article.html"), "w", encoding="utf-8") as f:
                f.write(driver.page_source)
            
            # 多种方式尝试定位内容区域
            content_selectors = [
                'div#multi-text',
                '.m-txt',
                '.article-content',
                '.content',
                'div[class*="content"]',
                'div[class*="article"]',
                '[class*="content"]',
                '[class*="article"]',
                'div'
            ]
            
            content = None
            soup = BeautifulSoup(driver.page_source, 'html.parser')
            for selector in content_selectors:
                content = soup.select_one(selector)
                if content and len(content.get_text()) > 100:  # 至少要有一定内容
                    logger.info(f"使用选择器 {selector} 找到内容区域，文本长度: {len(content.get_text())}")
                    break
                    
            if not content:
                logger.warning("未找到文章内容区域")
                # 尝试使用paqu.py中的方法
                content = soup.select_one('div#multi-text') or soup.find('div', class_='m-txt') or soup.body
                if content:
                    logger.info("使用备用方法找到内容区域")
                    
            if not content:
                logger.warning("未找到任何文章内容区域")
                return {}

            # 使用paqu.py中的解析方法
            section_titles = {
                "今日推荐": "【今日推荐】",
                "观国内": "【观国内】",
                "览海外": "【览海外】"
            }
            end_markers = [
                "【大公司】", "【掘金圈】", "【一财精选】",
                "【今日推荐】", "【观国内】", "【览海外】"
            ]
            sections = {}
            current_section = None
            items = []

            for element in content.find_all(recursive=False):
                if element.name != "p":
                    continue

                strong_elements = element.find_all("strong")
                if strong_elements:
                    strong_text = "".join([s.get_text(strip=True) for s in strong_elements])

                    for sec_name, sec_title in section_titles.items():
                        if sec_title in strong_text:
                            if current_section and items:
                                sections[current_section] = items.copy()
                                logger.debug(f"保存板块 {current_section}: {len(items)} 个项目")
                            current_section = sec_name
                            items = []
                            logger.debug(f"发现新板块: {current_section}")
                            break

                    if not current_section:
                        continue

                    for marker in end_markers:
                        if marker in strong_text and marker != section_titles[current_section]:
                            if items:
                                sections[current_section] = items.copy()
                                logger.debug(f"保存板块 {current_section}: {len(items)} 个项目")
                            logger.debug(f"结束板块: {current_section}")
                            current_section = None
                            items = []
                            break

                if current_section and strong_elements:
                    item_text = "".join([s.get_text(strip=True) for s in strong_elements])
                    if section_titles[current_section] not in item_text:
                        clean_text = re.sub(r'\s{2,}', ' ', item_text)
                        if clean_text:
                            items.append(clean_text)

            if current_section and items:
                sections[current_section] = items
                logger.debug(f"保存最终板块 {current_section}: {len(items)} 个项目")

            logger.info(f"解析到板块: {list(sections.keys())}")
            for section, items in sections.items():
                logger.debug(f"  {section}: {len(items)} 个项目")
            return sections
        except Exception as e:
            logger.error(f"解析文章内容时出错: {str(e)}")
            import traceback
            traceback.print_exc()
            return {}
        finally:
            driver.quit()

    def _get_eastmoney_content(self):
        """
        获取东方财富内容
        """
        driver = self._get_driver(headless=True)
        try:
            # 使用paqu.py中的方法
            from selenium.webdriver.common.by import By
            import time
            import datetime
            
            LIST_URL = "https://stock.eastmoney.com/a/czpnc.html"
            TODAY_STR = f"{datetime.date.today().month}月{datetime.date.today().day}日"
            driver.get(LIST_URL)
            logger.info(f"正在访问东方财富网: {LIST_URL}")
            time.sleep(5)
            
            # 等待页面加载完成
            from selenium.webdriver.support.ui import WebDriverWait
            WebDriverWait(driver, 20).until(
                lambda d: d.execute_script("return document.readyState") == "complete"
            )
            
            # 保存页面用于调试
            with open(os.path.join(BASE_DIR, "debug_eastmoney_list.html"), "w", encoding="utf-8") as f:
                f.write(driver.page_source)
            
            sections = {}
            # 查找包含今天日期的文章
            selectors = [
                ".repeatList a", ".Zt a", ".newsList a", ".list a", 
                "a[href*='/a/']", "ul.newslist a", ".list ul a", "a"
            ]
            
            links = []
            for selector in selectors:
                try:
                    elements = driver.find_elements(By.CSS_SELECTOR, selector)
                    if elements:
                        links.extend(elements)
                        logger.debug(f"东方财富选择器 {selector} 找到 {len(elements)} 个元素")
                        if len(links) >= 30:
                            break
                except Exception as e:
                    logger.debug(f"东方财富使用选择器 {selector} 时出错: {e}")
                    continue
            
            logger.info(f"东方财富总共找到 {len(links)} 个链接")
            
            em_url = ""
            for i, a in enumerate(links[:30]):
                try:
                    text = a.text
                    href = a.get_attribute("href")
                    logger.debug(f"检查东方财富链接 {i}: 文本='{text}', 链接='{href}'")
                    if TODAY_STR in text and href:
                        em_url = href if href.startswith("http") else "https:" + href
                        logger.info(f"找到东方财富文章: {text} - {em_url}")
                        break
                except Exception as e:
                    logger.debug(f"检查东方财富链接 {i} 时出错: {e}")
                    continue
                    
            if not em_url:
                logger.warning("未找到东方财富今日文章")
                return sections
                
            driver.get(em_url)
            time.sleep(3)
            
            # 等待页面加载完成
            WebDriverWait(driver, 20).until(
                lambda d: d.execute_script("return document.readyState") == "complete"
            )
            
            # 保存文章页面用于调试
            with open(os.path.join(BASE_DIR, "debug_eastmoney_article.html"), "w", encoding="utf-8") as f:
                f.write(driver.page_source)
                
            soup = BeautifulSoup(driver.page_source, 'html.parser')
            
            # 使用paqu.py中的解析方法
            body_selectors = [".newsContent", "#ContentBody", ".article-body", ".Body", ".content", "div"]
            body = None
            for selector in body_selectors:
                body = soup.select_one(selector)
                if body and len(body.get_text()) > 100:
                    logger.info(f"东方财富使用选择器 {selector} 找到正文区域")
                    break
            
            if not body:
                body = soup
                logger.warning("东方财富未找到明确正文区域，使用整个页面")
                
            sections_dict, cur = {}, "正文"
            sections_dict[cur] = []
            
            if hasattr(body, 'children'):
                for tag in body.children:
                    if hasattr(tag, 'name'):
                        if tag.name == "h3":
                            cur = tag.get_text(strip=True)
                            sections_dict[cur] = []
                            logger.debug(f"东方财富发现新标题: {cur}")
                        elif tag.name == "p":
                            txt = re.sub(r'<[^>]+>', '', str(tag))
                            txt = re.sub(r'\s+', ' ', txt).strip()
                            if txt and not re.match(r'^\d+\.\s*[\u4e00-\u9fa5]+：$', txt):
                                sections_dict[cur].append(txt)

            # 只保留需要的板块
            keep_titles = ["每日精选", "热点题材", "商品期货"]
            sections = {k: v for k, v in sections_dict.items() if k in keep_titles or any(kt in k for kt in keep_titles)}
            logger.info(f"从东方财富获取到板块: {list(sections.keys())}")
            for section, items in sections.items():
                logger.debug(f"  东方财富 {section}: {len(items)} 个项目")
                
            return sections
        except Exception as e:
            logger.error(f"访问东方财富时出错: {str(e)}")
            import traceback
            traceback.print_exc()
            return {}
        finally:
            driver.quit()

    def _merge_and_deduplicate(self, yicai, eastmoney):
        """
        合并并去重内容
        """
        merged = {}
        # 合并第一财经内容
        for title, content in yicai.items():
            if '今日推荐' in title:
                merged['今日推荐'] = content
            elif '观国内' in title:
                merged['观国内'] = content
            elif '览海外' in title or '海外' in title:
                merged['览海外'] = content
            elif '精选' in title:
                merged['每日精选'] = content
            elif '热点' in title:
                merged['热点题材'] = content
            elif '期货' in title:
                merged['商品期货'] = content
            else:
                # 保留其他标题
                merged[title] = content

        # 合并东方财富内容
        for title, content in eastmoney.items():
            # 如果标题已存在，则合并内容
            if title in merged:
                merged[title].extend(content)
                logger.debug(f"合并内容到已有板块 {title}，新增 {len(content)} 个项目")
            else:
                merged[title] = content
                logger.debug(f"新增板块 {title}，包含 {len(content)} 个项目")
                
        logger.info(f"合并后板块: {list(merged.keys())}")
        for section, items in merged.items():
            logger.info(f"  合并后 {section}: {len(items)} 个项目")
        return merged

    def _summarize_with_ai(self, raw_filename):
        """
        使用AI总结内容
        """
        try:
            import requests
            import json
            
            # 读取原始内容
            with open(raw_filename, 'r', encoding='utf-8') as f:
                content = f.read()
                
            # 调用DeepSeek API
            DEEPSEEK_KEY = "sk-d34cd6ffbfe3477b98da40cc8f8f4267"
            MODEL = "deepseek-chat"
            
            headers = {
                "Authorization": f"Bearer {DEEPSEEK_KEY}",
                "Content-Type": "application/json"
            }
            
            prompt = f"""
            你是资深财经编辑，请对以下事件进行总结，要求如下：
            1. 事件粒度：
               - 【今日推荐】【观国内】【览海外】板块已按冒号拆分成独立事件，每个事件单独一行，直接保留无需总结
               - 【每日精选】【热点题材】【商品期货】板块需要逐条总结，每条一句话，保留日期/数字/机构/人名
            2. 去重：语义完全重复的内容只保留首次出现的
            3. 格式要求：
            国内新闻
            1. 事件1
            2. 事件2
            …
            
            国际新闻
            1. 事件1
            2. 事件2
            …
            4. 严格按上述格式输出，只输出结果，不添加任何额外说明
            
            内容如下：
            {content}
            """
            
            payload = {
                "model": MODEL,
                "messages": [{"role": "user", "content": prompt}],
                "max_tokens": 4096,
                "temperature": 0.2,
            }
            
            response = requests.post(
                "https://api.deepseek.com/v1/chat/completions",
                headers=headers,
                json=payload,
                timeout=180
            )
            response.raise_for_status()
            summary = response.json()["choices"][0]["message"]["content"].strip()
            
            # 保存AI总结结果
            summary_filename = raw_filename.replace("原始内容", "AI总结结果")
            with open(summary_filename, 'w', encoding='utf-8') as f:
                f.write(summary)
                
            logger.info(f"AI总结完成，结果保存至: {summary_filename}")
            
            # 解析为 dict
            final = {"国内新闻": [], "国际新闻": []}
            lines = summary.splitlines()
            cur = None
            for line in lines:
                line = line.strip()
                if line == "国内新闻":
                    cur = "国内新闻"
                elif line == "国际新闻":
                    cur = "国际新闻"
                elif cur and line and re.match(r'^\d+\.', line):
                    # 只添加带有编号的行
                    final[cur].append(re.sub(r'^\d+\.\s*', '', line))
            
            return final
            
        except Exception as e:
            logger.error(f"AI总结出错: {str(e)}")
            import traceback
            traceback.print_exc()
            return None

    def crawl(self):
        """
        爬取国内国际新闻并直接写入主原始内容文件
        """
        logger.info("开始获取国内/国际新闻...")
        yicai_url = self._get_first_article_url()
        if not yicai_url:
            logger.error("未能获取第一财经晨报链接")
            # 即使获取失败，也要在主文件中记录
            today_date = date.today().strftime("%Y%m%d")
            raw_filename = os.path.join(BASE_DIR, "yuanshineirong", f"原始内容_{today_date}.txt")
            with open(raw_filename, 'a', encoding='utf-8') as f:
                f.write("\n\n======= 国内国际新闻原始内容 =======\n\n")
                f.write("未能获取第一财经晨报链接\n")
            return
            
        logger.info(f"第一财经晨报链接: {yicai_url}")
        yicai_content = self._parse_sections(yicai_url)

        # 东方财富当日
        eastmoney_content = self._get_eastmoney_content()

        merged = self._merge_and_deduplicate(yicai_content, eastmoney_content)
        
        # 直接写入主原始内容文件，而不是创建临时文件
        today_date = date.today().strftime("%Y%m%d")
        raw_filename = os.path.join(BASE_DIR, "yuanshineirong", f"原始内容_{today_date}.txt")
        with open(raw_filename, 'a', encoding='utf-8') as f:
            f.write("\n\n======= 国内国际新闻原始内容 =======\n\n")
            for title, plist in merged.items():
                f.write(f"\n【{title}】\n")
                merged_items = []
                i = 0
                while i < len(plist):
                    if i + 1 < len(plist) and len(plist[i]) < 50 and not re.search(r'[。？！]$', plist[i]):
                        if not plist[i].endswith(('：', ':', '。', '!', '?')):
                            merged_text = f"{plist[i]}：{plist[i + 1]}"
                        else:
                            merged_text = f"{plist[i]}{plist[i + 1]}"
                        merged_items.append(merged_text)
                        i += 2
                    else:
                        merged_items.append(plist[i])
                        i += 1
                for idx, item in enumerate(merged_items, 1):
                    f.write(f"{idx}. {item}\n")
                f.write("\n")
        
        logger.info("国内国际新闻原始内容已保存")
        logger.info(f"获取到的板块: {list(merged.keys())}")
        for section, items in merged.items():
            logger.info(f"  最终 {section}: {len(items)} 个项目")