# -*- coding: utf-8 -*-
# Author   : ZhangQing
# Time     : 2025-07-08 6:43
# File     : data_process.py
# Project  : risk-contagion-analysis
# Desc     :

import pandas as pd
import requests
import re
from sqlalchemy import create_engine
from concurrent.futures import ThreadPoolExecutor
import logging


class DataCollector:
    def __init__(self, config):
        """初始化数据采集器

        Args:
            config: 配置信息，包含API密钥、数据库连接等
        """
        self.config = config
        self.logger = self._setup_logger()
        self.db_engine = create_engine(config['database_uri'])

    def _setup_logger(self):
        """设置日志记录器"""
        logger = logging.getLogger("DataCollector")
        logger.setLevel(logging.INFO)
        handler = logging.StreamHandler()
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        handler.setFormatter(formatter)
        logger.addHandler(handler)
        return logger

    def fetch_csmar_data(self, start_year=2003, end_year=2022):
        """从CSMAR数据库获取年报MD&A文本

        Args:
            start_year: 开始年份
            end_year: 结束年份

        Returns:
            DataFrame: 包含公司代码、年份、MD&A文本的数据框
        """
        self.logger.info(f"开始获取CSMAR数据 ({start_year}-{end_year})...")

        # 实际实现会使用CSMAR的API或SDK
        # 这里用模拟数据演示
        csmar_data = []
        for year in range(start_year, end_year + 1):
            try:
                # 在实际实现中替换为API调用
                api_url = f"{self.config['csmar_api_base']}/corptext?year={year}"
                response = requests.get(
                    api_url,
                    headers={"Authorization": f"Bearer {self.config['csmar_api_key']}"}
                )

                if response.status_code == 200:
                    year_data = response.json()
                    csmar_data.extend(year_data)
                    self.logger.info(f"成功获取{year}年数据，记录数: {len(year_data)}")
                else:
                    self.logger.error(f"获取{year}年数据失败: {response.status_code}")
            except Exception as e:
                self.logger.error(f"处理{year}年数据时出错: {str(e)}")

        return pd.DataFrame(csmar_data)

    def fetch_wind_security_investment(self):
        """从Wind数据库获取企业网络安全投资数据

        Returns:
            DataFrame: 包含公司代码、年份、安全投资额的数据框
        """
        self.logger.info("开始获取Wind安全投资数据...")

        # 实际实现需要连接Wind数据库API
        # 这里用示例代码
        try:
            # 假设WindPy是Wind的Python API
            import WindPy as w
            w.start()

            # 获取所有A股代码
            stocks = w.wset("sectorconstituent", "date=2022-12-31;sectorid=a001010100000000")
            codes = stocks.Data[1]

            security_data = []
            for code in codes:
                # 获取B13字段(网络安全投资)
                data = w.wsd(code, "b13", "2003-01-01", "2022-12-31", "")
                if data.ErrorCode == 0:
                    for i, value in enumerate(data.Data[0]):
                        year = 2003 + i
                        security_data.append({
                            "stock_code": code,
                            "year": year,
                            "security_investment": value
                        })

            w.close()
            return pd.DataFrame(security_data)
        except Exception as e:
            self.logger.error(f"获取Wind数据时出错: {str(e)}")
            return pd.DataFrame()

    def crawl_miit_security_incidents(self):
        """爬取工信部信息安全事件备案数据

        Returns:
            DataFrame: 包含公司名称、时间、事件描述的数据框
        """
        self.logger.info("开始爬取工信部安全事件数据...")

        # 实际实现需要考虑反爬策略、IP代理等
        try:
            # 爬虫代码示例
            base_url = "https://example-miit-site.gov.cn/security-incidents"
            incidents = []

            # 模拟分页爬取
            for page in range(1, 100):  # 假设有100页
                response = requests.get(
                    f"{base_url}?page={page}",
                    headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0...)"}
                )

                if response.status_code != 200:
                    break

                # 使用正则表达式或BeautifulSoup解析HTML
                # 这里简化处理
                pattern = r'<tr>.*?<td>(.*?)</td>.*?<td>(.*?)</td>.*?<td>(.*?)</td>.*?</tr>'
                matches = re.findall(pattern, response.text, re.DOTALL)

                for match in matches:
                    company, date, description = match
                    incidents.append({
                        "company": company.strip(),
                        "date": date.strip(),
                        "description": description.strip()
                    })

                # 防止请求频率过高
                import time
                time.sleep(2)

            return pd.DataFrame(incidents)
        except Exception as e:
            self.logger.error(f"爬取工信部数据时出错: {str(e)}")
            return pd.DataFrame()

    def clean_and_preprocess(self, df, data_type):
        """清洗和预处理数据

        Args:
            df: 原始数据框
            data_type: 数据类型标识

        Returns:
            DataFrame: 处理后的数据框
        """
        self.logger.info(f"开始清洗{data_type}数据...")

        if df.empty:
            return df

        # 根据不同数据类型进行差异化处理
        if data_type == "csmar":
            # 移除HTML标签
            df['mda_text'] = df['mda_text'].apply(
                lambda x: re.sub(r'<.*?>', '', str(x)) if pd.notnull(x) else '')

            # 统一页码标注格式
            df['mda_text'] = df['mda_text'].apply(
                lambda x: re.sub(r'B_\d+', lambda m: m.group().replace('B_', 'Page '), x))

            # 移除重复空格
            df['mda_text'] = df['mda_text'].apply(lambda x: re.sub(r'\s+', ' ', x).strip())

        elif data_type == "wind":
            # 处理缺失值
            df['security_investment'].fillna(0, inplace=True)

            # 异常值处理(如大于行业均值3倍的)
            df['z_score'] = df.groupby('year')['security_investment'].transform(
                lambda x: (x - x.mean()) / x.std())
            df = df[df['z_score'].abs() <= 3].drop('z_score', axis=1)

        elif data_type == "miit":
            # 标准化日期格式
            df['date'] = pd.to_datetime(df['date'], errors='coerce')

            # 提取年份
            df['year'] = df['date'].dt.year

            # 公司名称标准化(去掉"有限公司"等后缀)
            df['company'] = df['company'].apply(
                lambda x: re.sub(r'(股份)?有限(责任)?公司$', '', x).strip())

        return df

    def save_to_database(self, df, table_name):
        """将数据保存到数据库

        Args:
            df: 数据框
            table_name: 表名
        """
        if df.empty:
            self.logger.warning(f"没有数据可保存到{table_name}表")
            return

        try:
            self.logger.info(f"保存数据到{table_name}表，记录数: {len(df)}")
            df.to_sql(
                table_name,
                self.db_engine,
                if_exists='replace',  # 可根据需要改为'append'
                index=False,
                chunksize=1000  # 分批处理
            )
            self.logger.info(f"数据成功保存到{table_name}表")
        except Exception as e:
            self.logger.error(f"保存数据到{table_name}表时出错: {str(e)}")

    def schedule_updates(self, cron_expression="0 0 1 * *"):  # 每月1日执行
        """设置定时任务进行数据更新

        Args:
            cron_expression: cron表达式，定义执行频率
        """
        # 实际实现可以使用APScheduler库
        from apscheduler.schedulers.background import BackgroundScheduler

        scheduler = BackgroundScheduler()
        scheduler.add_job(self.update_all_data, 'cron',
                          month='*', day='1', hour='0', minute='0')
        scheduler.start()
        self.logger.info(f"数据更新任务已设置: {cron_expression}")

    def update_all_data(self):
        """更新所有数据源的数据"""
        self.logger.info("开始全量数据更新...")

        # 获取当前年份
        import datetime
        current_year = datetime.datetime.now().year

        # 只更新最近一年的数据
        csmar_data = self.fetch_csmar_data(current_year, current_year)
        csmar_data = self.clean_and_preprocess(csmar_data, "csmar")
        self.save_to_database(csmar_data, "csmar_mda_texts")

        # 更新其他数据源
        wind_data = self.fetch_wind_security_investment()
        wind_data = self.clean_and_preprocess(wind_data, "wind")
        self.save_to_database(wind_data, "wind_security_investment")

        miit_data = self.crawl_miit_security_incidents()
        miit_data = self.clean_and_preprocess(miit_data, "miit")
        self.save_to_database(miit_data, "miit_security_incidents")

        self.logger.info("全量数据更新完成")

    def run_data_collection(self):
        """运行完整的数据采集流程"""
        self.logger.info("开始完整数据采集流程...")

        # 使用线程池并行采集各数据源
        with ThreadPoolExecutor(max_workers=3) as executor:
            csmar_future = executor.submit(self.fetch_csmar_data)
            wind_future = executor.submit(self.fetch_wind_security_investment)
            miit_future = executor.submit(self.crawl_miit_security_incidents)

            # 获取结果
            csmar_data = csmar_future.result()
            wind_data = wind_future.result()
            miit_data = miit_future.result()

        # 数据清洗和预处理
        csmar_data = self.clean_and_preprocess(csmar_data, "csmar")
        wind_data = self.clean_and_preprocess(wind_data, "wind")
        miit_data = self.clean_and_preprocess(miit_data, "miit")

        # 保存到数据库
        self.save_to_database(csmar_data, "csmar_mda_texts")
        self.save_to_database(wind_data, "wind_security_investment")
        self.save_to_database(miit_data, "miit_security_incidents")

        # 设置定时更新
        self.schedule_updates()

        self.logger.info("数据采集流程完成")

        # 返回数据框供后续处理
        return {
            "csmar": csmar_data,
            "wind": wind_data,
            "miit": miit_data
        }
