import pandas as pd
import numpy as np
import pymysql
from database.database_manager import DatabaseManager
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import math

def save_data():
    database = 'mydatabase'
    if database == 'manhattan_db':
        connection = pymysql.connect(host='47.94.239.94', port=3306, user='manhattan', db=database, password='123456')
        query = "SELECT * FROM stock_daily_info LIMIT 100000"
        data = pd.read_sql(query, connection)

        filtered_data = data[data["change"] > 9.9]
        filtered_data["trade_date"] = pd.to_datetime(filtered_data["trade_date"])

        # 对每支股票进行分组并计算涨停区间天数、涨幅和换手率
        grouped = filtered_data.groupby("ts_code")
        result_data = pd.DataFrame(columns=["stock_code", "interval_days", "amplitude", "turnover_rate"])

        for stock_code, group in grouped:
            group["time_diff"] = group["trade_date"].diff()
            group["interval_days"] = group["time_diff"].dt.days
            group["amplitude"] = group["high"].diff()
            group["turnover_rate"] = group["turn_over"] / group["turn_over"].shift(1)  # 此处改成累计换手率

            for index, row in group.iterrows():
                interval_days = row["time_diff"]
                amplitude = row["amplitude"]
                turnover_rate = row["turnover_rate"]

                if pd.isna(interval_days):
                    continue
                else:
                    result_data = result_data.append({
                        "stock_code": stock_code,
                        "interval_days": interval_days,
                        "amplitude": abs(amplitude),
                        "turnover_rate": turnover_rate
                    }, ignore_index=True)

        result_data.to_excel("result_data.xlsx", index=False)
    if database == 'mydatabase':
        connection = pymysql.connect(host='127.0.0.1', user='root', passwd='wla', db=database, charset='utf8')
        query = "SELECT * FROM stock_daily LIMIT 100000"
        data = pd.read_sql(query, connection)
        condition1 = data['stock_code'].str.startswith(('30', '688'))
        condition2 = data["change_val"] > 19.9
        condition3 = data["change_val"] > 9.9

        filtered_data = data[(condition1 & condition2) | (~condition1 & condition3)]
        filtered_data["state_dt"] = pd.to_datetime(filtered_data["state_dt"])

        # 对每支股票进行分组并计算涨停区间天数、涨幅和换手率
        grouped = filtered_data.groupby("stock_code")
        result_data = pd.DataFrame(
            columns=["stock_code", "interval_days", "first", "second", "amplitude", "turnover_rate"])

        for stock_code, group in grouped:
            group["time_diff"] = group["state_dt"].diff()
            group["interval_days"] = group["time_diff"].dt.days
            group["amplitude"] = group["high"].diff().pct_change() * 100
            group["amplitude"] = group["high"].diff()
            group["turnover_rate"] = group["vol"] / group["vol"].shift(1)  # 此处改成累计换手率

            last_state_dt = None
            last_high = None
            for index, row in group.iterrows():
                interval_days = row["time_diff"]
                amplitude = row["amplitude"]
                turnover_rate = row["turnover_rate"]
                current_state_dt = row["state_dt"]
                current_high = row["high"]

                if pd.isna(interval_days):
                    continue
                else:
                    if last_state_dt is not None:
                        result_data = result_data.append({
                            "stock_code": stock_code,
                            "interval_days": interval_days,
                            "first": last_state_dt,
                            "second": current_state_dt,
                            "amplitude": round(abs(amplitude) / last_high, 2) * 100,
                            "turnover_rate": turnover_rate
                        }, ignore_index=True)
                    last_state_dt = current_state_dt
                    last_high = current_high

        result_data.to_excel("result_data.xlsx", index=False)

def analyze():
    # 读取 Excel 文件
    excel_file = "result_data.xlsx"
    data = pd.read_excel(excel_file, usecols=["interval_days", "amplitude"], engine="openpyxl")

    plt.figure(figsize=(10, 6))
    plt.subplot(1, 2, 1)
    sns.histplot(data=data, x="interval_days", bins=60, kde=True)
    plt.title("Distribution of Interval Days")
    plt.xlabel("Interval Days")
    plt.ylabel("Frequency")
    plt.show()

'''
减少模型在不同尺度数据上的泛化偏差
sample: 选取一定区间的样本{interval_days: list, amplitude: list}
x1: 某只股票的 interval_days
x2: 在这个区间中对应的 amplitude
w1：区间天数权重
w2：区间振幅权重
'''
def scale_data(sample: dict, interval_days: int, amplitude: int, w1: float, w2: float):
    linear_score = w1 * ((interval_days - sample['min_interval_days']) / (
                sample['max_interval_days'] - sample['min_interval_days'])) + w2 * (
                           1 - (amplitude - sample['min_amplitude']) / (
                               sample['max_amplitude'] - sample['min_amplitude']))

    log_score = w1 * (log_scale(interval_days, sample['min_interval_days'], sample['max_interval_days'])) + w2 * (
            1 - (amplitude - sample['min_amplitude']) / (sample['max_amplitude'] - sample['min_amplitude']))

    # 区间映射
    interval_weights = {
        (1, 10): 0.5,
        (11, 30): 0.8,
        (31, float('inf')): 1.0
    }

    amplitude_weights = {
        (0, 5): 0.8,
        (5, 10): 0.6,
        (10, float('inf')): 0.4
    }

    # 计算interval_days和amplitude在对应区间内的映射权重
    interval_weight = next(
        weight for (min_val, max_val), weight in interval_weights.items() if min_val <= interval_days <= max_val)
    amplitude_weight = next(
        weight for (min_val, max_val), weight in amplitude_weights.items() if min_val <= amplitude <= max_val)

    # 计算分数
    mapping_score = (interval_weight + amplitude_weight) / 2


def log_scale(x, a, b):
    return math.log(x + 1 - a) / math.log(b - a + 1)


if __name__ == "__main__":
    save_data()
