import random
import time

import numpy as np
import torch
from scipy.io import loadmat

class Checker():
    def __init__(self,data_train_path=[],id_list=[], model_path='',use_GPU=False,vote_num=10, muti_check_num=2,overlay_num=1):
        self.data_train_path=data_train_path
        self.vote_num=vote_num
        self.muti_check_num=muti_check_num
        self.model_path =model_path
        self.overlay_num=overlay_num
        self.id_list=id_list
        self.use_GPU =use_GPU
        self.load_train_data()
        self.load_model()

    def iq_nn_inputs(self,data_in):
        data_out = np.zeros((data_in.shape[0], 1, 2, data_in.shape[1]), dtype=np.float32)
        for i in range(len(data_in)):
            data_out[i, 0, 0, :] = np.real(data_in[i])
            data_out[i, 0, 1, :] = np.imag(data_in[i])
        return data_out    
    def overlay_list(self,data):
        overlay_num=self.overlay_num
        out_data=[]
        for idx in range(len(data)-overlay_num+1):
            overlay_rff = np.mean(data[idx:idx+overlay_num],axis=0)
            out_data.append(overlay_rff)
        return np.array(out_data)
    def load_train_data(self):
        (path, key_name) = self.data_train_path[0]
        data = loadmat(path)[key_name]
        for (path_append, key_append) in self.data_train_path[1:]:
            data_append = loadmat(path_append)[key_append]
            data = np.concatenate((data_append, data), axis=0)
        y_data_train = np.real(data[:, -1])
        data = data[:, :-1]
        data_train_dict = {}
        for y in self.id_list:
            x_train = self.overlay_list(data[y_data_train == int(y)])
            indexList = random.sample(range(len(x_train)), self.vote_num)
            self.indexList=indexList
            # indexList=[1099, 975, 1016, 1352, 1011, 1009, 1003, 1007, 984, 1015, 895, 1046, 1011]
            # print('index',indexList,len(x_train),self.vote_num)
            x_train = x_train[indexList]
            data_train_dict[y] = self.iq_nn_inputs(x_train)
        self.data_train_dict =  data_train_dict

    def load_model(self):
        model_save_path=self.model_path
        self.model = torch.load(model_save_path)
        print('load model: ',model_save_path)
        if self.use_GPU:
            self.device = torch.device("cuda")
        else:
            self.device = torch.device("cpu")
        self.model.to(self.device)

    def checkValid(self,rff,target_id):
        rff = torch.tensor(np.array([rff]*self.vote_num))
        target_data = torch.tensor(self.data_train_dict[target_id])
        predict_y, domain_output,f1,f2 =self.model(rff.to(self.device).float(),target_data.to(self.device).float(),-0.5)

        # print('time',time_end-time_start)
        label_pred = np.where(predict_y.cpu().data.numpy() > 0, 1, 0)
        if sum(label_pred)>self.vote_num/2:
            return 1
        else:
            return 0
def iq_nn_inputs(data_in):
    data_out = np.zeros((data_in.shape[0], 1, 2, data_in.shape[1]), dtype=np.float32)
    for i in range(len(data_in)):
        data_out[i, 0, 0, :] = np.real(data_in[i])
        data_out[i, 0, 1, :] = np.imag(data_in[i])
    return data_out
def load_file_test(path_list=[]):
    print(path_list)
    (path, key_name) = path_list[0]
    data = loadmat(path)[key_name]
    for i in range(len(path_list)-1):
        (path_append, key_append) = path_list[i+1]
        data_append = loadmat(path_append)[key_append]
        data = np.concatenate((data_append, data), axis=0)
    np.random.shuffle(data)
    y_data_test = np.real(data[:, -1])
    data=data[:,:-1]

    data_size_test = len(y_data_test)
    print("data size:", data_size_test)

    x_len_test = []
    x_test_dict = {}
    class_list = np.unique(y_data_test)
    y_list_test = class_list
    print("data class", class_list)
    for y in y_list_test:
        _x_test = data[y_data_test == int(y)]
        x_test_dict[y] = iq_nn_inputs(_x_test)
        x_len_test.append(len(_x_test))
    print(x_len_test)
    return x_test_dict
#指定本地对比帧文件路径
path_list_train=[('1029_rff_ddd_raw.mat','rff')]

checker=Checker(
    data_train_path=path_list_train,
    model_path = 'model_172919.pt',
    id_list=[1,3,4,5,6,7,9,10,12,13],    ###################################测试ID
    overlay_num=40, 
    vote_num=1,  
    muti_check_num=1,
    use_GPU=True
)
#指定测试文件路径
path_list_test=[('1101_rff_ddd_raw.mat','rff')]
x_test_dict=load_file_test(path_list_test)
id_list=checker.id_list
device_acc=[]
for id in id_list:
    invalid_acc=[]
    for y in x_test_dict:
        x_test_data = x_test_dict[y]
        x_test_data = checker.overlay_list(x_test_data)
        output_list = []
        temp_list = []
        for rff in x_test_data:
            output = checker.checkValid(rff, id)
            if (len(temp_list)!=0):
                if output*len(temp_list)==sum(temp_list):
                    output_list.append(output)
            else:
                output_list.append(output)
            if (len(temp_list) == checker.muti_check_num):
                del temp_list[0]
            temp_list.append(output)
        acc=sum(output_list)/len(output_list)
        if y==id:
            valid_acc = acc
        else:
            invalid_acc.append(1-acc)
    invalid_acc = sum(invalid_acc)/len(invalid_acc)
    print(f'target_id: {id} valid: {valid_acc} invalid: {invalid_acc} mean: {(valid_acc+invalid_acc)/2}')
    device_acc.append((valid_acc+invalid_acc)/2)
all_acc = sum(device_acc)/len(device_acc)
print(f'all acc {all_acc}')
#[1099, 975, 1016, 1352, 1011, 1009, 1003, 1007, 984, 1015, 895, 1046, 1011]





