from modelSource import Multi_featureV2_3_modify, SepConv1D
from loadDataset import Load2022Dataset, LoadMachineData
import mat73
import numpy as np
import torch
from config import MyConfig
from other import SignalDispose
import random

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

def detect_letter_P3speller(
    pred_score, word_len, label, letter_ind, markers_seq, params
):
    """Function for detecing letter from the predicted results from unknown EEG"""
    user_answer = np.chararray(word_len, 1)
    acc_on_repetition = np.zeros(params["full_repeat"])
    correct_on_repetition = np.zeros(params["full_repeat"])
    for n_repeat in range(params["full_repeat"]):
        for n_letter in range(word_len):
            # begin and end trial for a single letter session
            begin_trial = len(params["seq_code"]) * params["full_repeat"] * (n_letter)
            end_trial = begin_trial + (n_repeat + 1) * len(params["seq_code"])

            unknown_speller_code = np.zeros(len(params["seq_code"]))
            for j in range(begin_trial, end_trial):
                # predict and add lda score
                unknown_speller_code[int(markers_seq[letter_ind[j]]) - 1] = (
                    unknown_speller_code[int(markers_seq[letter_ind[j]]) - 1]
                    + pred_score[j]
                )

            row = np.argmax(unknown_speller_code[0:6])
            col = np.argmax(unknown_speller_code[6:12])
            user_answer[n_letter] = params["spellermatrix"][row * 6 + col]
        user_answer_string = user_answer.tobytes().decode()

        correct_on_repetition[n_repeat] = len(
            [i for i, j in zip(user_answer_string, label) if i == j]
        )
        acc_on_repetition[n_repeat] = correct_on_repetition[n_repeat] / len(label)

    out = {
        "text_result": user_answer_string,
        "acc_on_repetition": acc_on_repetition,
        "correct_on_repetition": correct_on_repetition,
    }
    return out


def predict2022DataSet(dataFilePath: str, modelFilePath: str):
    spellerMatrix = ['A', 'B', 'C', 'D', 'E', 'F',
                     'G', 'H', 'I', 'J', 'K', 'L',
                     'M', 'N', 'O', 'P', 'Q', 'R',
                     'S', 'T', 'U', 'V', 'W', 'X',
                     'Y', 'Z', '1', '2', '3', '4',
                     '5', '6', '7', '8', '9', '_']
    Config_P3speller = {
        "seq_code": range(1, 13),
        "full_repeat": 15,
        "spellermatrix": spellerMatrix,
    }
    Params_P3speller = {
        "freq": [0.5, 10],
        "frame": [0, 600],
        "baseline": [-200, 0],
        "select_ch": (16, 19, 31),
    }
    EEG = mat73.loadmat(dataFilePath)
    model = torch.load(modelFilePath).to(device)
    for n_test in range(len(EEG["test"])):
        cur_eeg = EEG["test"][n_test]
        data = np.asarray(cur_eeg["data"])

        srate = cur_eeg["srate"]
        data = SignalDispose.butter_bandpass_filter(data, 0.5, 10, srate, 4)
        markers = cur_eeg["markers_target"]
        word_len = int(
            cur_eeg["nbTrials"]
            / (len(Config_P3speller["seq_code"]) * Config_P3speller["full_repeat"])
        )

        markers_seq = cur_eeg["markers_seq"]
        letter_idx = np.where(np.isin(markers_seq, Config_P3speller["seq_code"]))[0]
        # ch x time x trial
        unknownEEG = SignalDispose.extractEpoch3D(
            data,
            letter_idx,
            srate,
            Params_P3speller["baseline"],
            Params_P3speller["frame"],
            False,
        )
        # ch(16,19,31) x time x trial => trial x (16,19,31) x time
        datas = np.array(
            [
                unknownEEG[16, :, :],
                unknownEEG[19, :, :],
                unknownEEG[31, :, :],
                unknownEEG[16, :, :],
                unknownEEG[19, :, :],
                unknownEEG[31, :, :],
            ]
        )
        datas = np.transpose(datas, (2, 0, 1))
        datas = torch.tensor(datas, dtype=torch.float32)
        output = []
        for i in range(datas.shape[0]):
            pred = model(datas[i, :, :].unsqueeze(0).unsqueeze(0).to(device))
            output.append(torch.argmax(pred).item())

        ans_letters = detect_letter_P3speller(
            output,
            word_len,
            cur_eeg["text_to_spell"],
            letter_idx,
            markers_seq,
            Config_P3speller,
        )
        cur_text_result = ans_letters["text_result"]

        print(
            f"User answer: {cur_text_result} ({int(ans_letters['correct_on_repetition'][-1])}/{int(word_len)}), accuracy: {ans_letters['acc_on_repetition'][-1]}"
        )

def predictMachine(myConfig: MyConfig.MyConfig):
    dataset = Load2022Dataset.Load2022Dataset(myConfig.modelDataSetRootPath, True)
    tarData, _ = dataset.getDataNdarray()
    # ta,tb,tc,td=tarData.shape
    # tarData=tarData.transpose(1,2,0,3).reshape(tb,tc,ta*td)
    tarData=tarData[random.randint(0,tarData.shape[0]-1),:,:,:]
    tarDataChannel=myConfig.dataAChannel

    choiceSingal=tarData[:,:,[random.randint(0, tarData.shape[2] - 1) for _ in range(12)]]
    
    model = torch.load(myConfig.modelFilePath).to(device)
    datasetSingle = LoadMachineData.LoadMachineDataSet(
        myConfig.machineDataFile,
        myConfig.dataBChannel,
        myConfig.machineDataLabel,
        myConfig.machineDataTargetWordDict,
        myConfig.machineDataCSVDelimiter,
        True
    )
    signalData,markInfo=datasetSingle.getDataNdarray()
    
    repeatTimes=int(markInfo.shape[0]/12)
    # 这里每12次为一个repeat
    if markInfo.shape[0]%12!=0:
        markInfo=markInfo[:repeatTimes*12]
        signalData=signalData[:,:,:repeatTimes*12]

    ansList=[]
    # 每一个字符，每个字符有10次
    for i in range(int(markInfo.shape[0]/120)):
        ansDict={}
        SingleWordSignal=signalData[:,:,i*120:(i+1)*120]
        SingleWordMark=markInfo[i*120:(i+1)*120]
        for j in range(10):
            index=[0]*12
            SingleSignal=SingleWordSignal[:,:,j*12:(j+1)*12]
            SingleMark=SingleWordMark[j*12:(j+1)*12]
            # [12,6,307]
            datas=np.array(
                [
                    choiceSingal[tarDataChannel[0], :, :],
                    choiceSingal[tarDataChannel[1], :, :],
                    choiceSingal[tarDataChannel[2], :, :],
                    # SingleSignal[0, :, :],
                    # SingleSignal[1, :, :],
                    # SingleSignal[2, :, :],
                    SingleSignal[0, :, :],
                    SingleSignal[1, :, :],
                    SingleSignal[2, :, :]
                ]
            )
            datas=np.transpose(datas,(2,0,1))
            datas = torch.tensor(datas,dtype=torch.float32)
            datas=datas.unsqueeze(1).to(device)
            # print(datas.shape)
            pred = model(datas)
            # print("current pred: ",pred)
            for k in range(len(pred)):
                ans=pred[k].argmax().item()
                if ans==1:
                    index[int(SingleMark[k]%100)-1]+=1
                    # print(SingleMark[k])
            isAllZero=all(x==0 for x in index)
            if isAllZero:
                continue
            row = np.argmax(index[0:6])
            col = np.argmax(index[6:12])
            character=LoadMachineData.LoadMachineDataSet.targetWordMatrix[row][col]
            print(f"第{i}个字符，第{j}次预测: 行为{row}，列为{col}，字符为{character}")
            ansDict[character] = ansDict.get(character, 0) + 1
        max_char = max(ansDict, key=ansDict.get)
        ansList.append(max_char)
        print("-----------------------------------------")
    print(f"最终预测的字符为：{ansList}")

