import csv
import pandas as pd
import pandas as pd
import numpy as np
from pgmpy.estimators import BDeuScore, K2Score, BicScore
from pgmpy.models import BayesianNetwork
import json
from Hive.DAG_update import DataConversion
import os
from colorama import Fore

best_vector = [
    0.0,
    1.0,
    1.0,
    1.0,
    1.0,
    1.0,
    1.0,
    1.0,
    1.0,
    1.0,
    0.0,
    0.0,
    1.0,
    1.0,
    0.0,
    1.0,
    0.0,
    1.0,
    1.0,
    1.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    1.0,
    1.0,
    0.0,
    0.0,
    0.0,
    1.0,
    0.0,
    0.0,
    1.0,
    0.0,
    1.0,
    1.0,
    1.0,
    0.0,
    1.0,
    1.0,
    1.0,
    0.0,
    1.0,
    0.0,
    1.0,
    1.0,
    1.0,
    0.0,
    0.0,
    1.0,
    0.0,
    0.0,
    0.0,
    0.0,
    1.0,
    1.0,
    1.0,
    0.0,
    1.0,
    1.0,
    1.0,
    1.0,
    1.0,
    0.0,
    1.0,
    1.0,
    1.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    1.0,
    0.0,
    0.0,
    0.0,
    0.0,
    1.0,
    0.0,
    0.0,
    0.0,
    0.0,
    1.0,
    1.0,
    0.0,
]


class DataName:

    ColumnNames = ["G1", "G2", "G3", "G4", "G5", "G6", "G7", "G8", "G9", "G10"]


def read_Dream4_time_series_data_by_name(name):
    data_path = name
    print(f"read data from {data_path}")
    df = pd.read_csv(data_path, sep="\t")
    df = df.dropna()
    df = df.iloc[1:, 1:]
    data = df.values.astype(float)
    data1 = data[0:102, :]
    data2 = data[1:103, :]
    data3 = data[2:104, :]
    gen_data = np.concatenate((data1, data2, data3), axis=1)
    return gen_data


def single_display():
    data = read_data_file(DataName.Task1, DataName.Heterozygous, without_noise=True)
    data = read_Dream4_data()
    import pandas as pd
    import numpy as np
    from pgmpy.estimators import BDeuScore, K2Score, BicScore
    from pgmpy.models import BayesianNetwork

    data = pd.DataFrame(data, columns=DataName.ColumnNames)
    bdeu = BDeuScore(data, equivalent_sample_size=5)
    k2 = K2Score(data)
    bic = BicScore(data)
    model_gold = BayesianNetwork(
        [
            ("G2", "G1"),
            ("G2", "G3"),
            ("G3", "G4"),
            ("G9", "G4"),
            ("G3", "G5"),
            ("G8", "G5"),
            ("G9", "G5"),
            ("G3", "G6"),
            ("G3", "G7"),
            ("G8", "G7"),
            ("G10", "G7"),
        ]
    )
    parent_name, child_name = "G2", "G1"
    print(
        f"bdeu {parent_name} -> {child_name}: {bdeu.local_score(child_name, parents=[parent_name])}"
    )
    parent_name, child_name = "G1", "G2"
    print(
        f"bdeu {parent_name} -> {child_name}: {bdeu.local_score(child_name, parents=[parent_name])}"
    )

    parent_name, child_name = "G7", "G10"
    print(
        f"bdeu {parent_name} -> {child_name}: {bdeu.local_score(child_name, parents=[parent_name])}"
    )
    parent_name, child_name = "G10", "G7"
    print(
        f"bdeu {parent_name} -> {child_name}: {bdeu.local_score(child_name, parents=[parent_name])}"
    )


def save_train_info(save_info, save_path):
    for key, value in save_info.items():
        if type(value) == np.ndarray:
            save_info[key] = value.tolist()
    with open(save_path, "w") as f:
        json.dump(save_info, f, indent=4)


def single_this_test():
    data = read_Dream4_time_series_data()
    df = pd.DataFrame(
        data,
        columns=[f"A{i}" for i in range(1, 11)]
        + [f"B{i}" for i in range(1, 11)]
        + [f"C{i}" for i in range(1, 11)],
    )
    df.to_excel("output/data/rebuild.xlsx", index=False)


def read_ground_truth(idx=1, type="vector"):
    file_name = f"DREAM4 in silico challenge/Size 10/DREAM4 gold standards/insilico_size10_{idx}_goldstandard.tsv"
    df = pd.read_csv(
        file_name, sep="\t", header=None, names=["source", "target", "value"]
    )
    # 获取所有的节点
    nodes = [f"G{i}" for i in range(1, 11)]
    # 创建一个空的邻接矩阵
    adj_matrix = pd.DataFrame(
        np.zeros((len(nodes), len(nodes)), dtype=int), index=nodes, columns=nodes
    )

    # 填充邻接矩阵
    for _, row in df.iterrows():
        adj_matrix.loc[row["source"], row["target"]] = row["value"]
    adj_matrix = adj_matrix.values
    if type == "matrix":
        return adj_matrix
    elif type == "vertor":
        ground_vector = DataConversion.matrix2vector(adj_matrix)
        return ground_vector


if __name__ == "__main__":

    # read_Dream4_data()
    # eval_test()
    single_this_test()
    # print(os.getcwd())
    # single_display()
    # data = read_data_file(DataName.Task1, DataName.Heterozygous)

    # print(data)
