import requests
import json
import time
import random
from datetime import datetime
from bs4 import BeautifulSoup
import re
from urllib.parse import urljoin
from loguru import logger
from typing import List, Dict, Optional, Any

class BaseParser:
    def __init__(self):
        self.session = requests.Session()
        self.base_url = "https://finance.eastmoney.com"
        
        # 通用请求头
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        }
        self.session.headers.update(self.headers)
    
    def get_news_list(self, tag: str = "", page: int = 1) -> List[Dict]:
        """通用获取新闻列表方法"""
        try:
            url = self._build_url(tag, page)
            response = self.session.get(url, timeout=15)
            if response.status_code == 200:
                return self._parse_page(response.text, tag)
        except Exception as e:
            logger.error(f"获取新闻数据失败: {str(e)}")
        return []
    
    def _build_url(self, tag: str, page: int) -> str:
        """构建请求URL"""
        url = f"{self.base_url}?page={page}"
        if tag:
            url += f"&tag={tag}"
        return url
    
    def _parse_page(self, html_content: str, keyword: str = "") -> List[Dict]:
        """通用页面解析方法"""
        try:
            soup = BeautifulSoup(html_content, 'html.parser')
            news_list = []
            
            logger.info("正在解析页面...")
            
            # 首先尝试从页面中提取公告标题和日期
            page_title, page_date = self._extract_page_title_and_date(soup)
            logger.info(f"从页面提取到标题: {page_title}, 日期: {page_date}")
            
            # 尝试从脚本中提取JSON数据
            json_data = self._extract_json_from_scripts(soup)
            if json_data:
                news_list = self._convert_json_to_news(json_data)
                if news_list:
                    return news_list[:20]  # 限制数量
            
            # 如果没有找到JSON数据，尝试解析HTML表格
            news_list = self._parse_html_tables(soup)
            if news_list:
                return news_list[:20]
            
            # 如果没有解析到数据，尝试从页面文本中提取信息
            news_list = self._extract_from_page_text(soup)
            
            logger.info(f"从页面解析到 {len(news_list)} 条数据")
            return news_list
            
        except Exception as e:
            logger.error(f"解析页面失败: {str(e)}")
            return []
    
    def _extract_json_from_scripts(self, soup) -> List[Dict]:
        """从脚本中提取JSON数据"""
        script_tags = soup.find_all('script')
        for script in script_tags:
            if script.string:
                json_patterns = self._get_json_patterns()
                for pattern in json_patterns:
                    matches = re.findall(pattern, script.string, re.DOTALL)
                    for match in matches:
                        try:
                            json_data = self._parse_json_match(match, pattern)
                            if json_data:
                                return json_data
                        except Exception as e:
                            logger.debug(f"解析JSON数据失败: {str(e)}")
                            continue
        return []
    
    def _get_json_patterns(self) -> List[str]:
        """获取JSON提取模式 - 子类可重写"""
        return [
            r'var\s+(?:data|list|reportData)\s*=\s*(\[.*?\]);',
            r'"(?:data|list|reportList)"\s*:\s*(\[.*?\])',
            r'reportData\s*[=:]\s*(\[.*?\])',
        ]
    
    def _parse_json_match(self, match: str, pattern: str) -> List[Dict]:
        """解析JSON匹配项"""
        try:
            cleaned_match = self._clean_json_string(match)
            json_data = json.loads(cleaned_match)
            
            if isinstance(json_data, list) and len(json_data) > 0:
                logger.info(f"从脚本中提取到 {len(json_data)} 条数据")
                return json_data
            elif isinstance(json_data, dict):
                return self._extract_nested_data(json_data)
        except Exception as e:
            logger.debug(f"JSON解析失败: {str(e)}")
        return []
    
    def _clean_json_string(self, json_str: str) -> str:
        """清理JSON字符串"""
        cleaned = json_str.strip()
        # 移除尾随逗号
        cleaned = re.sub(r',(\s*[}\]])', r'\1', cleaned)
        # 清理换行符和制表符
        cleaned = cleaned.replace('\n', '').replace('\r', '').replace('\t', '')
        return cleaned
    
    def _extract_nested_data(self, json_data: Dict) -> List[Dict]:
        """提取嵌套数据"""
        if 'list' in json_data:
            list_data = json_data['list']
            if isinstance(list_data, dict) and 'data' in list_data:
                data_list = list_data['data']
                if isinstance(data_list, dict) and 'list' in data_list:
                    actual_data = data_list['list']
                    if isinstance(actual_data, list) and len(actual_data) > 0:
                        logger.info(f"从嵌套结构中提取到 {len(actual_data)} 条数据")
                        return actual_data
        return []
    
    def _parse_html_tables(self, soup) -> List[Dict]:
        """解析HTML表格"""
        selectors = self._get_table_selectors()
        for selector in selectors:
            try:
                elements = soup.select(selector)
                if elements:
                    logger.info(f"使用选择器 '{selector}' 找到 {len(elements)} 个元素")
                    news_list = []
                    for element in elements[:30]:  # 限制数量
                        news_item = self._extract_from_element(element)
                        if news_item and news_item.get('title'):
                            news_list.append(news_item)
                    
                    if news_list:
                        logger.info(f"使用选择器 '{selector}' 成功解析到 {len(news_list)} 条数据")
                        return news_list
                        
            except Exception as e:
                logger.debug(f"选择器 {selector} 失败: {str(e)}")
                continue
        return []
    
    def _get_table_selectors(self) -> List[str]:
        """获取表格选择器 - 子类可重写"""
        return [
            'table tbody tr',
            '.dataview tbody tr',
            '.table-responsive tbody tr',
            '[data-code]',
        ]
    
    def _extract_from_element(self, element) -> Dict:
        """从元素中提取数据 - 子类必须实现"""
        raise NotImplementedError("子类必须实现 _extract_from_element 方法")
    
    def _extract_from_page_text(self, soup) -> List[Dict]:
        """从页面文本中提取信息 - 子类可重写"""
        return []
    
    def _convert_json_to_news(self, json_data: List[Dict]) -> List[Dict]:
        """将JSON数据转换为新闻格式"""
        news_list = []
        for item in json_data:
            if isinstance(item, dict):
                news_item = self._convert_data_to_news(item)
                if news_item:
                    news_list.append(news_item)
        return news_list
    
    def _convert_data_to_news(self, data_item: Dict) -> Dict:
        """将数据项转换为新闻格式 - 子类必须实现"""
        raise NotImplementedError("子类必须实现 _convert_data_to_news 方法")
    
    def _extract_page_title_and_date(self, soup) -> tuple:
        """从页面中提取公告标题和日期"""
        title = ''
        date = ''
        
        try:
            # 1. 尝试从页面标题中提取
            page_title = soup.find('title')
            if page_title:
                title_text = page_title.get_text(strip=True)
                if self._is_relevant_title(title_text):
                    title = title_text
            
            # 2. 尝试从h1-h6标签中提取标题
            if not title:
                for tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
                    title_elem = soup.find(tag)
                    if title_elem:
                        title_text = title_elem.get_text(strip=True)
                        if len(title_text) > 5 and self._is_relevant_title(title_text):
                            title = title_text
                            break
            
            # 3. 尝试从strong和b标签中提取标题
            if not title:
                for tag in ['strong', 'b']:
                    strong_elems = soup.find_all(tag)
                    for elem in strong_elems:
                        title_text = elem.get_text(strip=True)
                        if len(title_text) > 5 and self._is_relevant_title(title_text):
                            title = title_text
                            break
                    if title:
                        break
            
            # 4. 尝试从链接文本中提取标题
            if not title:
                links = soup.find_all('a')
                for link in links:
                    link_text = link.get_text(strip=True)
                    if len(link_text) > 10 and self._is_relevant_title(link_text):
                        title = link_text
                        break
            
            # 5. 提取日期信息
            date = self._extract_date_from_text(title)
            if not date:
                page_text = soup.get_text()
                date = self._extract_date_from_text(page_text)
            
        except Exception as e:
            logger.debug(f"提取页面标题和日期失败: {str(e)}")
        
        return title, date
    
    def _is_relevant_title(self, title_text: str) -> bool:
        """判断标题是否相关 - 子类可重写"""
        keywords = ['公告', '报告', '预告', '重组', '年报']
        return any(keyword in title_text for keyword in keywords)
    
    def _extract_date_from_text(self, text: str) -> str:
        """从文本中提取日期"""
        date_patterns = [
            r'(\d{4}-\d{2}-\d{2})',
            r'(\d{4}/\d{2}/\d{2})',
            r'(\d{4}年\d{1,2}月\d{1,2}日)',
            r'(\d{1,2}月\d{1,2}日)',
            r'(\d{1,2}-\d{1,2})'
        ]
        
        for pattern in date_patterns:
            match = re.search(pattern, text)
            if match:
                return match.group(1)
        return ''
    
    def _format_title(self, stock_code: str, stock_name: str, content: str) -> str:
        """格式化标题：证券代码、证券名称、公告内容"""
        if stock_code and stock_name:
            return f"{stock_code} {stock_name} - {content}"
        elif stock_name:
            return f"{stock_name} - {content}"
        else:
            return content
    
    def _extract_stock_info_from_cells(self, cell_texts: List[str]) -> tuple:
        """从单元格文本中提取股票信息"""
        stock_name = ''
        stock_code = ''
        
        # 查找股票代码（6位数字）
        for i, text in enumerate(cell_texts):
            if re.match(r'\d{6}', text):
                stock_code = text
                # 尝试找到股票名称（在股票代码前面或后面的短文本）
                if i > 0 and len(cell_texts[i-1]) < 10:
                    stock_name = cell_texts[i-1]
                elif i + 1 < len(cell_texts) and len(cell_texts[i+1]) < 10:
                    stock_name = cell_texts[i+1]
                break
        
        return stock_name, stock_code
    
    def _request_json(self, url: str, params: Optional[Dict[str, Any]] = None, 
                     referer: Optional[str] = None, timeout: int = 15, max_retries: int = 3):
        """统一请求方法：
        - 设置Eastmoney常见请求头（User-Agent/Accept/Referer/Accept-Language）
        - 自动携带当前session cookies
        - 指数退避重试
        - 兼容JSONP（去函数名包裹）
        - 返回dict或list，否则返回None
        """
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36',
            'Accept': 'application/json, text/javascript, */*; q=0.01',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Referer': referer or 'https://data.eastmoney.com/',
            'Connection': 'keep-alive',
        }
        delay = 0.6
        last_err = None
        for attempt in range(1, max_retries + 1):
            try:
                resp = self.session.get(url, params=params, headers=headers, timeout=timeout)
                status = resp.status_code
                if status != 200:
                    logger.debug(f"请求失败 status={status}, url={resp.url}")
                    raise RuntimeError(f"HTTP {status}")
                text = resp.text.strip()
                # JSONP剥壳：匹配 callback(...) 或 jQuery123(...)
                if (text.startswith('callback(') or text.startswith('jQuery') or 
                    text.endswith(')') and text.find('{') > -1):
                    l = text.find('(')
                    r = text.rfind(')')
                    if l != -1 and r != -1 and r > l:
                        text = text[l+1:r]
                try:
                    return json.loads(text)
                except Exception:
                    try:
                        return resp.json()
                    except Exception as je:
                        # 记录前1KB片段，便于排查
                        snippet = text[:1024]
                        logger.debug(f"JSON解析失败，片段: {snippet}")
                        last_err = je
                        raise
            except Exception as e:
                last_err = e
                if attempt < max_retries:
                    time.sleep(delay)
                    delay *= 1.6
                    continue
                break
        logger.debug(f"_request_json 重试耗尽: {str(last_err)}")
        return None
    
    def _normalize_time(self, time_str: str) -> str:
        """标准化时间格式
        
        Args:
            time_str: 时间字符串
            
        Returns:
            标准化后的时间字符串
        """
        if not time_str:
            # 不使用当前时间兜底，保持为空，让上层决定如何处理
            return ''
        
        try:
            # 尝试各种可能的时间格式
            time_patterns = [
                '%Y-%m-%d %H:%M:%S',
                '%Y-%m-%d %H:%M',
                '%Y-%m-%d',
                '%m-%d %H:%M',
                '%H:%M'
            ]
            
            # 清理时间字符串，只保留数字、连字符、冒号和空格
            time_str = re.sub(r'[^\d\-:\s]', '', time_str).strip()
            
            for pattern in time_patterns:
                try:
                    parsed_time = datetime.strptime(time_str, pattern)
                    # 如果没有年份，使用当前年份
                    if pattern in ['%m-%d %H:%M', '%H:%M']:
                        parsed_time = parsed_time.replace(year=datetime.now().year)
                    return parsed_time.strftime('%Y-%m-%d %H:%M:%S')
                except:
                    continue
            
            # 如果都解析失败，返回空字符串
            return ''
            
        except Exception as e:
            logger.error(f"时间格式化失败: {e}")
            return ''
    
    def _format_news_data(self, raw_data: List[Dict]) -> List[Dict]:
        """
        格式化新闻数据
        
        Args:
            raw_data: 原始新闻数据
            
        Returns:
            格式化后的新闻数据
        """
        formatted_news = []
        
        for item in raw_data:
            if not isinstance(item, dict):
                continue
                
            formatted_item = {
                'id': item.get('id', ''),
                'title': item.get('title', '').strip(),
                'summary': item.get('summary', item.get('content', '')).strip(),
                'url': item.get('url', item.get('link', '')),
                'source': item.get('source', item.get('author', '未知来源')),
                'publish_time': item.get('publish_time', item.get('time', datetime.now().strftime('%Y-%m-%d %H:%M:%S'))),
                'category': item.get('category', item.get('tag', '')),
                'keywords': item.get('keywords', []),
                'crawl_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            }
            
            # 过滤无效数据
            if formatted_item['title'] and len(formatted_item['title']) > 5:
                formatted_news.append(formatted_item)
        
        return formatted_news