"""
新闻和基本面数据采集器模块

负责通过akshare API获取经济新闻和基本面数据，进行清洗，并保存到数据库
"""
import time
import datetime
import logging
import akshare as ak
import pandas as pd
from sqlalchemy.exc import IntegrityError

from modules.models.base import Base, get_db
from modules.models.stock import SecurityInfo
from modules.models.news import EconomicNews, StockNotice
from modules.models.macro import MacroEconomicData, EconomicCalendar
from config.settings import DATA_COLLECTION

# 配置日志
logger = logging.getLogger("news_fundamental_collector")


class NewsFundamentalCollector:
    """新闻和基本面数据采集类"""
    
    def __init__(self):
        """初始化采集器"""
        self.retry_limit = DATA_COLLECTION["retry_limit"]
        self.retry_delay = DATA_COLLECTION["retry_delay"]
        self.batch_size = DATA_COLLECTION["batch_size"]
        self.db = next(get_db())
    
    def __del__(self):
        """析构函数，确保关闭数据库连接"""
        if hasattr(self, 'db') and self.db:
            self.db.close()
    
    #---------------- 经济新闻采集 ----------------#
    
    def collect_financial_news(self, source="sina", limit=50):
        """
        采集财经新闻
        
        Args:
            source: 新闻来源，支持"sina"、"eastmoney"、"10jqka"等
            limit: 采集数量限制
            
        Returns:
            保存的新闻数量
        """
        logger.info(f"开始采集{source}财经新闻，数量限制: {limit}")
        
        try:
            # 根据来源选择不同的API
            if source == "sina":
                df = ak.stock_news_sina()
            elif source == "eastmoney":
                df = ak.stock_news_em()
            elif source == "10jqka":
                df = ak.stock_news_10jqka()
            else:
                logger.error(f"不支持的新闻来源: {source}")
                return 0
            
            # 限制数量
            if limit > 0 and len(df) > limit:
                df = df.head(limit)
            
            # 转换和清洗数据
            df = self._clean_news_data(df, source)
            
            # 保存到数据库
            saved_count = self._save_news(df)
            
            logger.info(f"成功保存{saved_count}条{source}财经新闻")
            return saved_count
            
        except Exception as e:
            logger.error(f"采集{source}财经新闻失败: {str(e)}")
            raise
    
    def collect_stock_notice(self, stock_code, start_date=None, end_date=None):
        """
        采集股票公告
        
        Args:
            stock_code: 股票代码
            start_date: 开始日期，格式：YYYYMMDD
            end_date: 结束日期，格式：YYYYMMDD
            
        Returns:
            保存的公告数量
        """
        logger.info(f"开始采集股票 {stock_code} 的公告")
        
        try:
            # 获取公告列表
            notice_df = ak.stock_notice_report(symbol=stock_code)
            
            # 筛选日期范围
            if start_date or end_date:
                notice_df["日期"] = pd.to_datetime(notice_df["日期"])
                
                if start_date:
                    start_date = pd.to_datetime(start_date)
                    notice_df = notice_df[notice_df["日期"] >= start_date]
                
                if end_date:
                    end_date = pd.to_datetime(end_date)
                    notice_df = notice_df[notice_df["日期"] <= end_date]
            
            # 转换和清洗数据
            notice_df = self._clean_news_data(notice_df, "notice")
            
            # 保存到数据库
            saved_count = self._save_news(notice_df)
            
            logger.info(f"成功保存{saved_count}条股票 {stock_code} 公告")
            return saved_count
            
        except Exception as e:
            logger.error(f"采集股票 {stock_code} 公告失败: {str(e)}")
            raise
    
    def _clean_news_data(self, df, source):
        """清洗和标准化新闻数据"""
        if df.empty:
            return df
        
        # 标准化列名
        df.columns = [col.lower() for col in df.columns]
        
        result_df = pd.DataFrame()
        
        # 根据源格式处理数据
        if source == "sina":
            # 新浪财经新闻
            result_df["title"] = df.get("标题") if "标题" in df.columns else df.get("title", "")
            result_df["publish_time"] = pd.to_datetime(df.get("发布时间") if "发布时间" in df.columns else df.get("publish_time", ""))
            result_df["url"] = df.get("链接") if "链接" in df.columns else df.get("url", "")
            result_df["source"] = "sina"
            result_df["category"] = "财经新闻"
            
        elif source == "eastmoney":
            # 东方财富新闻
            result_df["title"] = df.get("标题") if "标题" in df.columns else df.get("title", "")
            result_df["publish_time"] = pd.to_datetime(df.get("发布时间") if "发布时间" in df.columns else df.get("datetime", ""))
            result_df["url"] = df.get("链接") if "链接" in df.columns else df.get("url", "")
            result_df["source"] = "eastmoney"
            result_df["category"] = "财经新闻"
            
        elif source == "notice":
            # 股票公告
            result_df["title"] = df.get("标题") if "标题" in df.columns else df.get("title", "")
            result_df["publish_time"] = pd.to_datetime(df.get("日期") if "日期" in df.columns else df.get("date", ""))
            result_df["url"] = df.get("链接") if "链接" in df.columns else df.get("url", "")
            result_df["source"] = "stock_notice"
            result_df["category"] = "公司公告"
            result_df["content"] = df.get("正文") if "正文" in df.columns else None
        
        else:
            # 其他来源，尝试通用映射
            result_df["title"] = df.get("title", df.get("标题", ""))
            result_df["publish_time"] = pd.to_datetime(df.get("publish_time", df.get("发布时间", df.get("date", df.get("日期", "")))))
            result_df["url"] = df.get("url", df.get("链接", ""))
            result_df["content"] = df.get("content", df.get("内容", df.get("正文", None)))
            result_df["source"] = source
            result_df["category"] = "财经新闻"
        
        return result_df
    
    def _save_news(self, df):
        """保存新闻数据到数据库"""
        if df.empty:
            return 0
        
        saved_count = 0
        
        try:
            # 获取现有新闻标题和URL
            existing_titles = {
                title[0] for title in 
                self.db.query(EconomicNews.title).all()
            }
            existing_urls = {
                url[0] for url in 
                self.db.query(EconomicNews.url).filter(EconomicNews.url != None).all()
            }
            
            # 准备批量插入
            batch_records = []
            
            for _, row in df.iterrows():
                title = row.get('title')
                url = row.get('url')
                
                # 跳过已存在的记录
                if title in existing_titles or (url and url in existing_urls):
                    continue
                
                news = EconomicNews(
                    title=title,
                    content=row.get('content'),
                    publish_time=row.get('publish_time'),
                    source=row.get('source'),
                    category=row.get('category'),
                    url=url,
                    keywords=row.get('keywords')
                )
                
                batch_records.append(news)
                saved_count += 1
                
                # 分批提交
                if len(batch_records) >= self.batch_size:
                    self.db.add_all(batch_records)
                    self.db.commit()
                    batch_records = []
            
            # 提交剩余记录
            if batch_records:
                self.db.add_all(batch_records)
                self.db.commit()
            
            return saved_count
            
        except Exception as e:
            self.db.rollback()
            logger.error(f"保存新闻数据失败: {str(e)}")
            raise
    
    #---------------- 宏观经济数据采集 ----------------#
    
    def collect_macro_data(self, indicator_name, region="中国"):
        """
        采集宏观经济数据
        
        Args:
            indicator_name: 指标名称，如"gdp"、"cpi"、"ppi"等
            region: 地区，默认为"中国"
            
        Returns:
            保存的数据条数
        """
        logger.info(f"开始采集{region}{indicator_name}宏观经济数据")
        
        try:
            # 根据指标名称选择不同的API
            if indicator_name.lower() == "gdp":
                if region == "中国":
                    # 中国GDP
                    df = ak.macro_china_gdp()
                    indicator = "GDP"
                    period = "季度"
                    unit = "亿元"
                else:
                    # 其他国家GDP
                    df = ak.macro_get_global_country_name_url()
                    df = df[df["中文名称"] == region]
                    if not df.empty:
                        country_url = df.iloc[0]["url"]
                        df = ak.macro_country_gdp_yearly(country_url)
                        indicator = "GDP"
                        period = "年度"
                        unit = "美元"
                    else:
                        logger.error(f"不支持的地区: {region}")
                        return 0
                
            elif indicator_name.lower() == "cpi":
                if region == "中国":
                    # 中国CPI
                    df = ak.macro_china_cpi_yearly()
                    indicator = "CPI"
                    period = "年度"
                    unit = "%"
                else:
                    # 其他国家CPI
                    df = ak.macro_get_global_country_name_url()
                    df = df[df["中文名称"] == region]
                    if not df.empty:
                        country_url = df.iloc[0]["url"]
                        df = ak.macro_country_cpi_yearly(country_url)
                        indicator = "CPI"
                        period = "年度"
                        unit = "%"
                    else:
                        logger.error(f"不支持的地区: {region}")
                        return 0
            
            elif indicator_name.lower() == "ppi":
                if region == "中国":
                    # 中国PPI
                    df = ak.macro_china_ppi_yearly()
                    indicator = "PPI"
                    period = "年度"
                    unit = "%"
                else:
                    logger.error(f"不支持的地区: {region}")
                    return 0
            
            elif indicator_name.lower() == "unemployment":
                if region == "中国":
                    # 中国失业率
                    df = ak.macro_china_urban_unemployment()
                    indicator = "城镇调查失业率"
                    period = "月度"
                    unit = "%"
                else:
                    # 其他国家失业率
                    df = ak.macro_get_global_country_name_url()
                    df = df[df["中文名称"] == region]
                    if not df.empty:
                        country_url = df.iloc[0]["url"]
                        df = ak.macro_country_unemployment_yearly(country_url)
                        indicator = "失业率"
                        period = "年度"
                        unit = "%"
                    else:
                        logger.error(f"不支持的地区: {region}")
                        return 0
            
            else:
                logger.error(f"不支持的指标: {indicator_name}")
                return 0
            
            # 转换和清洗数据
            df = self._clean_macro_data(df, indicator, region, period, unit)
            
            # 保存到数据库
            saved_count = self._save_macro_data(df, indicator, region)
            
            logger.info(f"成功保存{saved_count}条{region}{indicator}宏观经济数据")
            return saved_count
            
        except Exception as e:
            logger.error(f"采集{region}{indicator_name}宏观经济数据失败: {str(e)}")
            raise
    
    def _clean_macro_data(self, df, indicator, region, period, unit):
        """清洗和标准化宏观经济数据"""
        if df.empty:
            return df
        
        # 标准化列名
        df.columns = [col.lower() for col in df.columns]
        
        result_df = pd.DataFrame()
        result_df["indicator"] = indicator
        result_df["region"] = region
        result_df["period"] = period
        result_df["unit"] = unit
        
        # 处理不同格式的数据
        if "年份" in df.columns and "gdp" in df.columns:
            # GDP数据
            result_df["time_point"] = pd.to_datetime(df["年份"], format="%Y")
            result_df["value"] = pd.to_numeric(df["gdp"], errors="coerce")
            
        elif "年份" in df.columns and "cpi" in df.columns:
            # CPI数据
            result_df["time_point"] = pd.to_datetime(df["年份"], format="%Y")
            result_df["value"] = pd.to_numeric(df["cpi"], errors="coerce")
            
        elif "年份" in df.columns and "ppi" in df.columns:
            # PPI数据
            result_df["time_point"] = pd.to_datetime(df["年份"], format="%Y")
            result_df["value"] = pd.to_numeric(df["ppi"], errors="coerce")
            
        elif "季度" in df.columns and "gdp" in df.columns:
            # 季度GDP数据
            result_df["time_point"] = pd.to_datetime(df["季度"])
            result_df["value"] = pd.to_numeric(df["gdp"], errors="coerce")
            
        elif "月份" in df.columns and "失业率" in df.columns:
            # 失业率数据
            result_df["time_point"] = pd.to_datetime(df["月份"])
            result_df["value"] = pd.to_numeric(df["失业率"], errors="coerce")
            
        else:
            # 尝试通用映射
            date_cols = [col for col in df.columns if any(x in col.lower() for x in ["年份", "日期", "月份", "季度", "time", "date", "year"])]
            value_cols = [col for col in df.columns if col not in date_cols]
            
            if date_cols and value_cols:
                result_df["time_point"] = pd.to_datetime(df[date_cols[0]])
                result_df["value"] = pd.to_numeric(df[value_cols[0]], errors="coerce")
            else:
                logger.warning(f"无法识别的数据格式: {df.columns}")
                return pd.DataFrame()  # 返回空DataFrame
        
        return result_df
    
    def _save_macro_data(self, df, indicator, region):
        """保存宏观经济数据到数据库"""
        if df.empty:
            return 0
        
        saved_count = 0
        
        try:
            # 获取已存在的时间点
            existing_records = {
                (r.indicator, r.region, r.time_point.strftime('%Y-%m-%d'))
                for r in self.db.query(MacroEconomicData).filter(
                    MacroEconomicData.indicator == indicator,
                    MacroEconomicData.region == region
                ).all()
            }
            
            # 准备批量插入
            batch_records = []
            
            for _, row in df.iterrows():
                time_point = row["time_point"].date()
                key = (indicator, region, time_point.strftime('%Y-%m-%d'))
                
                # 跳过已存在的记录
                if key in existing_records:
                    continue
                
                macro_data = MacroEconomicData(
                    indicator=row["indicator"],
                    region=row["region"],
                    period=row["period"],
                    time_point=time_point,
                    value=row["value"],
                    unit=row["unit"],
                    description=None  # 可以根据需要添加描述
                )
                
                batch_records.append(macro_data)
                saved_count += 1
                
                # 分批提交
                if len(batch_records) >= self.batch_size:
                    self.db.add_all(batch_records)
                    self.db.commit()
                    batch_records = []
            
            # 提交剩余记录
            if batch_records:
                self.db.add_all(batch_records)
                self.db.commit()
            
            return saved_count
            
        except Exception as e:
            self.db.rollback()
            logger.error(f"保存宏观经济数据失败: {str(e)}")
            raise


# 测试函数
def test_news_fundamental_collector():
    """测试新闻和基本面数据采集器功能"""
    collector = NewsFundamentalCollector()
    
    # 1. 测试采集财经新闻
    try:
        news_count = collector.collect_financial_news(source="sina", limit=10)
        print(f"成功采集 {news_count} 条新浪财经新闻")
    except Exception as e:
        print(f"采集财经新闻失败: {str(e)}")
    
    # 2. 测试采集宏观经济数据
    try:
        macro_count = collector.collect_macro_data(indicator_name="gdp", region="中国")
        print(f"成功采集 {macro_count} 条中国GDP数据")
    except Exception as e:
        print(f"采集宏观经济数据失败: {str(e)}")


if __name__ == "__main__":
    # 配置日志
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )
    
    # 执行测试
    test_news_fundamental_collector()