"""
数据集的定义，用的tensorflow数据集
要求数据集能处理自己的数据
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset
import os
import torch

"""
荧光数据类型
包含成员
1. 三维光谱数据矩阵(ndarry类型) 2.ex_begin 3.ex_end 4.em_start,em_end 5. ex_step 6. em_step

包含成员函数
1. 扣除背景(传参self，背景三维荧光数据(DataType)
2. 扣除散射(散射带1，散射带2)，只用self传参
"""
# 规定数据要放缩到ex为 220 -> 450 250 ->500
class FluoDataType:
    def __init__(self, file_path,bg_file_path): #要求地址格式是以 \，否则模式匹配会出错
        data = pd.read_excel(file_path).values
        data = data[np.where(data == "Data points")[0][0]+1:,:]
        self.ex_begin = int(data[0][1])
        self.ex_end = int(data[0][-1])
        self.ex_step = int(data[0][2] - data[0][1])
        self.em_begin = int(data[1][0])
        self.em_end = int(data[-1][0])
        self.em_step = int(data[2][0] - data[1][0])
        self.data = np.array(data[1:, 1:], dtype=np.float32)
        self.sub_back_ground(bg_file_path)
        self.sub_scatter()
        self.file_path = file_path
        self.unify_data()

    def get_ex(self):
        return self.ex_begin,self.ex_end,self.ex_step

    def get_em(self):
        return self.em_begin,self.em_end,self.em_step

    def sub_back_ground(self,bg_file_path):
        bgdata = pd.read_excel(bg_file_path).values
        bgdata = bgdata[np.where(bgdata == "Data points")[0][0] + 2:, 1:]
        self.data = np.array(self.data - bgdata,dtype=np.float32)

    def sub_scatter(self): #消除散射并且消除-的值
        ex = np.array(range(self.ex_begin, self.ex_end+1, self.ex_step))
        em = np.array(range(self.em_begin, self.em_end+1, self.em_step))
        for j in range(len(ex)):
            for i in range(len(em)):
                if self.data[i][j] < 0: #71 * 47矩阵，其中横行
                    self.data[i][j] = 0

                if ex[j]-10 <= em[i] <= ex[j] + 10 and self.data[i][j] >= 500:
                    self.data[i][j] = 0
                elif 2*ex[j]-30 <= em[i] <= 2*ex[j]+30 and self.data[i][j] >= 500:
                    self.data[i][j] = 0


    def draw_countour(self,pic_name,pic_long = 12.8,pic_height = 7.2,save_path = None):
        plt.rcParams['figure.figsize'] = (pic_long,pic_height)
        ex = np.array(range(self.ex_begin,self.ex_end+1,self.ex_step))
        em = np.array(range(self.em_begin,self.em_end+1,self.em_step))
        ex, em = np.meshgrid(ex, em)
        fig = plt.figure()
        plt.contourf(ex, em, self.data)
        plt.contour(ex, em, self.data)
        plt.tick_params(labelsize=16)
        plt.title(pic_name,fontdict = {"family":"Times New Roman"},fontsize = 32)
        plt.xlabel('Ex',fontdict = {"family":"Times New Roman"},fontsize = 28)
        plt.ylabel('Em',fontdict = {"family":"Times New Roman"},fontsize = 28)

        if save_path != None:
            plt.savefig(save_path)
            print("已保存"+pic_name+"图片,保存路径为："+save_path)
        else:
            plt.show()
        plt.close()

    #统一数据，步长均默认为5
    def unify_data(self,ex_begin = 220 , ex_end = 450,em_begin = 250, em_end = 500):
        ex = np.array(range(self.ex_begin,self.ex_end+1,self.ex_step))
        em = np.array(range(self.em_begin,self.em_end+1,self.em_step))
        for i in range(len(em)):
            if em[i] == em_begin:
                i_begin = i
            if em[i] == em_end:
                i_end = i

        for j in range(len(ex)):
            if ex[j] == ex_begin:
                j_begin = j
            if ex[j] == ex_end:
                j_end = j
        self.data = self.data[i_begin:i_end+1, j_begin:j_end+1]
        self.ex_begin = ex_begin
        self.ex_end = ex_end
        self.em_begin = em_begin
        self.em_end = em_end

"""
CNN的基本的数据集
Q要求包含所有的训练数据
要求重写以下这三个函数
"""
class MyDataSet(Dataset):
    def __init__(self,root_dir,label):
        self.root_dir = root_dir
        self.label = label
        self.dir_path = os.path.join(root_dir,label)
        self.data_list = os.listdir(self.dir_path)

    def __getitem__(self, idx):
        file_path = os.path.join(self.dir_path, self.data_list[idx])
        feature = torch.from_numpy(FluoDataType(file_path,os.path.join(self.root_dir,"background.xlsx")).data)
        return feature, self.label


    def __len__(self):
        return len(self.data_list)

class EmptyDataSet(Dataset):
    def __len__(self):
        return 0

    def __getitem__(self,idx):
        raise IndexError("EmptyDataSet can not be used")





