import matplotlib.pyplot as plt
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
import torch.nn.functional as F



#定义一维卷积神经网络模型

class UNet1D(nn.Module):
    def __init__(self, in_channels):
        super(UNet1D, self).__init__()

        # Encoder
        self.encoder = nn.Sequential(
            nn.Conv1d(in_channels, 64, kernel_size=3, padding=1),
            nn.BatchNorm1d(64),  # Add BatchNorm
            nn.ReLU(inplace=True),
            nn.Conv1d(64, 256, kernel_size=3, padding=1),
            nn.BatchNorm1d(256),  # Add BatchNorm
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=2, stride=2)
        )

        # Decoder
        self.decoder = nn.Sequential(
            nn.Conv1d(256, 128, kernel_size=3, padding=1),
            nn.BatchNorm1d(128),  # Add BatchNorm
            nn.ReLU(inplace=True),
            nn.Conv1d(128, 64, kernel_size=3, padding=1),
            nn.BatchNorm1d(64),  # Add BatchNorm
            nn.ReLU(inplace=True),
            nn.Conv1d(64, 8, kernel_size=3, padding=1),
            nn.BatchNorm1d(8),  # Add BatchNorm
            nn.ReLU(inplace=True),
            nn.ConvTranspose1d(8, 1, kernel_size=2, stride=2, output_padding=1)
        )

    def forward(self, x):
        x1 = self.encoder(x)
        x2 = self.decoder(x1)
        output = torch.sigmoid(x2)
        return output





# 定义数据集类
class CustomDataset(Dataset):
    def __init__(self, root_dir, threshold=1e-6):
        self.root_dir = root_dir
        self.pair_files = [f[:-12] for f in os.listdir(root_dir) if f.endswith('_max_ids.txt')]
        self.threshold = threshold

    def __len__(self):
        return len(self.pair_files)

    def __getitem__(self, idx):
        txt_filename = self.pair_files[idx] + '.txt'
        label_filename = self.pair_files[idx] + '_max_ids.txt'

        txt_file = os.path.join(self.root_dir, txt_filename)
        label_file = os.path.join(self.root_dir, label_filename)

        data = np.loadtxt(txt_file).astype(np.float32)

        # Normalize the data
        data = (data - np.min(data)) / (np.max(data) - np.min(data))

        # 读取标签并将可能的科学计数法表示的值转换为整数索引
        labels_sci = np.loadtxt(label_file).astype(np.float32)

        # 处理标签维度，确保其为可迭代的数组
        if labels_sci.ndim == 0:
            labels_sci = np.array([labels_sci])

        label_indices = [int('{:.0f}'.format(label)) for label in labels_sci]

        # 将索引及其前后两个位置置1，其余位置置0，并将形状设为和数据一样
        binary_label = np.zeros_like(data)

        for index in label_indices:
            binary_label[index] = 1
            binary_label[index-1] = 1
            binary_label[index-2] = 1
            binary_label[index+1] = 1
            binary_label[index+2] = 1

        # print(f"data shape: {data.shape}")
        # print(f"label indices: {label_indices}")
        # print(f"binary_label shape: {binary_label.shape}")
        # print(f"binary_label : ",binary_label)

        sample = {
            'input': torch.from_numpy(data).unsqueeze(0),
            'label': torch.from_numpy(binary_label),
            'file_name': txt_filename[:-4]  # Include the original file name in the sample
        }

        #print("pair of sample:", sample)

        return sample




def accuracy(outputs, labels):
    # 将输出转换为二元预测，使用阈值 0.5

    predictions = (outputs > 0.5).float().squeeze(1)
    labels = labels.squeeze(1)
    # print("prediction:","\n",predictions,"\n")
    # print("label:",labels)
    # 计算准确率
    correct = (predictions == labels).float()
    accuracy = correct.sum() / ((correct.size()[0])*(correct.size()[1]))
    return accuracy




def train_model(model, dataloader, criterion, optimizer, num_epochs=10):
    for epoch in range(num_epochs):
        model.train()
        total_loss = 0.0
        total_accuracy = 0.0

        for batch in dataloader:
            inputs = batch['input']
            labels = batch['label']
            labels = labels.unsqueeze(1)

            optimizer.zero_grad()
            outputs = model(inputs)

            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()


            # 计算准确率
            acc = accuracy(outputs, labels)
            total_accuracy += acc.item()
            total_loss += loss.item()

        average_loss = total_loss / len(dataloader)
        average_accuracy = total_accuracy / len(dataloader)

        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {average_loss:.4f}, Accuracy: {average_accuracy:.4f}')



def save_model(model, path):
    torch.save(model.state_dict(), path)

def load_model(model, path):
    model.load_state_dict(torch.load(path))
    model.eval()


def visualize_sample(inputs, labels, predictions, save_path):
    # 将输入数据转换为numpy数组
    inputs_np = inputs.squeeze().numpy()

    # 将标签和预测值转换为numpy数组
    labels_np = labels.squeeze().numpy()

    predictions_np = (predictions > 0.5).float().squeeze().numpy()

    # 绘制曲线
    plt.plot(inputs_np, label='Curve')

    # 标出真实点和预测点
    true_points = np.where(labels_np == 1)[0]
    pred_points = np.where(predictions_np == 1)[0]

    # print("inputs_np :", "\n", inputs_np, "\n")
    # print("label_np :", "\n", labels_np, "\n")
    # print("predictions:", "\n", predictions, "\n")
    # print("predictions_np:", "\n", predictions_np, "\n")
    # print("visual gt:", "\n", true_points, "\n")
    # print("visual pre:", "\n", pred_points,"\n")

    # 对坐标进行缩放，使其更好地显示在曲线上
    scale_factor = len(inputs_np) / 255.0

    plt.scatter(true_points * scale_factor, inputs_np[true_points], color='red', s=10, label='True Points')
    plt.scatter(pred_points * scale_factor, inputs_np[pred_points], color='green', s=10, label='Predicted Points')

    # 找到同时为真实值和预测值的点，使用紫色表示
    common_points = np.where((labels_np == 1) & (predictions_np == 1))[0]
    plt.scatter(common_points * scale_factor, inputs_np[common_points], color='purple', s=10, label='Common Points')

    plt.legend()

    # 保存图像到指定路径
    plt.savefig(save_path)

    # 清除当前图形，以便下一个样本的绘制
    plt.clf()


def test_model(model, dataloader, criterion, save_dir):
    model.eval()
    total_loss = 0.0
    total_accuracy = 0.0

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    with torch.no_grad():
        for i, batch in enumerate(dataloader):
            inputs = batch['input']
            labels = batch['label']
            labels = labels.unsqueeze(1)


            outputs = model(inputs)

            # print("model output:","\n",outputs,"\n")

            loss = criterion(outputs, labels)




            # Calculate accuracy
            acc = accuracy(outputs, labels)
            total_accuracy += acc.item()
            total_loss += loss.item()

            # Visualize and save the sample
            # Get the original file name from the batch
            file_name = batch['file_name'][0]

            save_path = os.path.join(save_dir, file_name)

            visualize_sample(inputs, labels, outputs, save_path)

    average_loss = total_loss / len(dataloader)
    average_accuracy = total_accuracy / len(dataloader)

    print(f'Test Loss: {average_loss:.4f}, Test Accuracy: {average_accuracy:.4f}')



################### 创建数据集和数据加载器

train_dir = "C:/Users/SONGSHITAO/Desktop/1d_data/data/train/"
train_dataset = CustomDataset(train_dir)
train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True)


test_dir = "C:/Users/SONGSHITAO/Desktop/1d_data/data/test/"
test_dataset = CustomDataset(test_dir)
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False)

# 创建模型、定义损失函数和优化器   Assuming input data has 1 channel
model = UNet1D(in_channels=1)

# Binary Cross Entropy Loss for binary classification
#criterion = nn.BCELoss()  # 二分类问题使用Binary Cross Entropy Loss

criterion = nn.BCEWithLogitsLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01)


# 训练模型
train_model(model, train_dataloader, criterion, optimizer, num_epochs=150)


# Save the trained model
model_path = "C:/Users/SONGSHITAO/Desktop/1d_data/saved_model.pth"
save_model(model, model_path)

# Test the model
load_model(model, model_path)

# 在调用 test_model 函数时，传入保存可视化结果的目录
test_model(model, test_dataloader, criterion, save_dir="C:/Users/SONGSHITAO/Desktop/1d_data/visualization_results/")
