import datetime

from scipy.ndimage import label
from sdv.single_table import GaussianCopulaSynthesizer



from tool_utils import *





def single_table_import_page(st):
    st.title("快速导入数据")
    sqlite_tool = get_sql_lite_tool()

    # 显示现有数据源
    data_sources = sqlite_tool.get_table_data('database_connections',limit=-1).get('data')
    append_csv_data_source(data_sources)
    # ------------------------- 源数据选择 -------------------------
    selected_source, target_source, write_mode = page_source_target(data_sources, st)

    if selected_source:
        source = next(s for s in data_sources if s["name"] == selected_source)
        try:
            if  source["type"] in ["MySQL", "PostgreSQL",'SQLSERVER']:
                # ...（连接数据库和获取表名部分保持不变）...

                # ------------------------- 新增：全选/反选按钮 -------------------------
                st.subheader("2. 批量配置参数")
                col_batch1, col_batch2= st.columns([2, 2])
                default_generate_rows = col_batch1.number_input("生成行数", min_value=1, value=1000)  # ✅ 默认100条
                with col_batch2:
                    st.write(" ")
                    st.write(" ")
                    if st.button(label = "确认"):
                        for key,table_config in st.session_state.table_configs.items():
                            print(default_generate_rows)
                            table_config["gen_rows"] = default_generate_rows
                sql_tool = get_sql_databaseTool(source['id'])


                all_tables = sql_tool.get_table_names()
                table_config_page(all_tables, default_generate_rows, sql_tool, st)
                # ------------------------- 目标数据配置 -------------------------
                ## 如果是csv
                if target_source == 'CSV':
                    if st.button("🚀 开始生成", use_container_width=True):
                        down_csv_file(source, st)

                else:
                    if st.button("🚀 开始生成", use_container_width=True):
                        import_database(data_sources, source, st, target_source, write_mode)
        except Exception as e:
            st.error(f"连接失败: {str(e)}")


def down_csv_file( source, st):
    df_dict ={}
    progress_bar = st.progress(0)
    selected_tables = [t for t, cfg in st.session_state.table_configs.items() if cfg["checked"]]
    total = len(selected_tables)
    for idx, table in enumerate(selected_tables):
        try:
            cfg = st.session_state.table_configs[table]
            # 连接目标数据库
            ## has_auto_increment_pk
            # 生成数据
            synthetic_data = sdv_create_data_from_database(source, table, st)
            # create table
            target_table_name = cfg["target_table"]

            df_dict[target_table_name] = synthetic_data

            # 更新进度
            progress = (idx + 1) / total
            progress_bar.progress(progress)
            st.success(f"表 [{table}] → [{cfg['target_table']}] 生成成功！")
        except Exception as e:
            print(e)
            st.error(f"表 [{table}] 处理失败: {str(e)}")

    output = BytesIO()
    with pd.ExcelWriter(output, engine='openpyxl') as writer:
        for key, df1 in df_dict.items():
            df1.to_excel(writer, sheet_name=key, index=False)
    print(df_dict)
    st.download_button(
        label="下载Excel",
        data=output.getvalue(),
        file_name=f'gen_data_{datetime.datetime.now().strftime("%Y%m%d%H%M%S")}.xlsx',
        mime='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
    )
    st.balloons()



def import_database(data_sources, source, st, target_source, write_mode):
    target_config = next(s for s in data_sources if s["name"] == target_source)
    progress_bar = st.progress(0)
    selected_tables = [t for t, cfg in st.session_state.table_configs.items() if cfg["checked"]]
    total = len(selected_tables)
    for idx, table in enumerate(selected_tables):
        try:
            cfg = st.session_state.table_configs[table]

            # 连接目标数据库
            target_engine = get_sql_databaseTool(target_config['id']).engine
            source_table = get_table_metadata(source, table)
            ## has_auto_increment_pk
            # 生成数据
            synthetic_data = sdv_create_data_from_database(source, table, st)
            if has_auto_increment_pk(source_table):
                ## delete_pk
                synthetic_data = synthetic_data.drop(columns=source_table.primary_key.columns.keys())

            # create table
            target_table_name = cfg["target_table"]
            create_table_if_not_exist(source['id'], source_table.name, target_config['id'], target_table_name)
            if write_mode == "覆盖":
                target_tool = get_sql_databaseTool(target_config['id'])
                target_tool.truncate_table(target_table_name)

                # 使用append模式写入（保证不重建表）
                synthetic_data.to_sql(
                    name=cfg["target_table"],
                    con=target_engine,
                    if_exists="append",
                    index=False,
                    schema=target_config['schema_name']

                )
            else:
                # 直接追加数据
                synthetic_data.to_sql(
                    name=cfg["target_table"],
                    con=target_engine,
                    if_exists="append",
                    index=False,
                    schema=target_config['schema_name']
                )

            # 更新进度
            progress = (idx + 1) / total
            progress_bar.progress(progress)
            st.success(f"表 [{table}] → [{cfg['target_table']}] 生成成功！")
        except Exception as e:
            print(e)
            st.error(f"表 [{table}] 处理失败: {str(e)}")
    st.balloons()


def table_config_page(all_tables, default_generate_rows, sql_tool, st):
    # 全选/反选按钮

    # ------------------------- 表格配置区域 -------------------------
    st.subheader("3. 表格配置（勾选需要处理的表）")
    button1,button2 = st.columns(2)
    with button1:
        if st.button("全选"):
            for table in all_tables:
                st.session_state[f"check_{table}"] = True

        if st.button("反选"):
            for table in all_tables:
                current = st.session_state.get(f"check_{table}", False)
                st.session_state[f"check_{table}"] = not current

    database_id = sql_tool.db_config['id']
    # 表格标题
    cols = st.columns([1, 2, 3, 3, 2])
    cols[0].markdown("**勾选**")
    cols[1].markdown("**原表名称**")
    cols[2].markdown("**总条数**")
    cols[3].markdown("**目标表名称**")
    cols[4].markdown("**生成数量**")
    # 初始化 session_state
    if "table_configs" not in st.session_state:
        st.session_state.table_configs = {}
    # 每行配置
    for table in all_tables:
        cols = st.columns([1, 2, 3, 3, 2])

        # 勾选框（自动初始化）
        checked = cols[0].checkbox(
            label=f'source_{database_id}_{table}',
            label_visibility="collapsed",
            value=st.session_state.table_configs.get(table, {}).get("checked", False),
            key=f"check_{database_id}_{table}"
        )
        # 原表名称
        cols[1].write(table)


        # 训练条数（自动统计）
        train_count = sql_tool.count(table)

        cols[2].code(f"{train_count} 条")

        # 目标表名称（带默认值）
        target_table = cols[3].text_input(
            label=f'target_{table}',
            value=table,  # 默认与源表同名
            key=f"target_{table}",
            label_visibility="collapsed"
        )

        # 生成数量（使用默认值100）
        gen_rows = cols[4].number_input(
            label=f'target_gen_{table}',
            min_value=1,
            value=st.session_state.table_configs.get(table, {}).get("gen_rows", default_generate_rows),
            # ✅ 默认100
            key=f"rows_{table}",
            label_visibility="collapsed"
        )

        # 保存配置
        st.session_state.table_configs[table] = {
            "checked": checked,
            "target_table": target_table,
            "gen_rows": gen_rows
        }


def page_source_target(data_sources, st):
    st.subheader("1. 选择数据源")
    col_source1, col_source2 = st.columns(2)

    selected_source_name = col_source1.selectbox("选择源数据源", [s["name"] for s in data_sources])
    selected_source = get_data_source_by_name(data_sources,selected_source_name)
    st.subheader("2. 目标数据源设置")
    col_target1, col_target2 = st.columns(2)

    target_sources = get_data_sources_by_type(data_sources,selected_source["type"])
    append_csv_data_source(target_sources)
    target_source_names = [s["name"] for s in target_sources]
    target_source = col_target1.selectbox("选择目标数据源", target_source_names)
    write_mode = col_target2.radio("写入模式", ["覆盖", "追加"], horizontal=True)
    return selected_source_name, target_source, write_mode






def sdv_create_data_from_database(datasource,table,st):

    cfg = st.session_state.table_configs[table]
    real_data = pd.read_sql_table(table, get_sql_databaseTool(database_id=datasource['id']).engine,schema=datasource['schema_name'])

    # first read metadata from sqlite
    database_id = datasource["id"]
    metadata = get_sdv_metadata(database_id, table_name=table)
    # 训练模型（示例使用高斯Copula）
    model = GaussianCopulaSynthesizer(metadata,locales=['zh_CN'])
    model.fit(real_data)
    # 生成数据
    synthetic_data = model.sample(cfg["gen_rows"])
    return synthetic_data










