import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
import torch.nn.functional as F



#定义一维卷积神经网络模型
class UNet1D(nn.Module):
    def __init__(self, in_channels):
        super(UNet1D, self).__init__()

        # Encoder
        self.encoder = nn.Sequential(
            nn.Conv1d(in_channels, 64, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv1d(64, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=2, stride=2)
        )

        # Decoder
        self.decoder = nn.Sequential(
            nn.Conv1d(256, 128, kernel_size=3, padding=1),  # Adjusted channel size
            nn.ReLU(inplace=True),
            nn.Conv1d(128, 64, kernel_size=3, padding=1),   # Adjusted channel size
            nn.ReLU(inplace=True),
            nn.Conv1d(64, 8, kernel_size=3, padding=1),     # Adjusted channel size
            nn.ReLU(inplace=True),
            nn.ConvTranspose1d(8, 1, kernel_size=2, stride=2, output_padding=1)  # Adding output_padding
        )

    def forward(self, x):
        x1 = self.encoder(x)
        x2 = self.decoder(x1)
        #output = F.sigmoid(x2)  # Applying Sigmoid activation
        output = torch.sigmoid(x2)
        return output




# 定义数据集类
class CustomDataset(Dataset):
    def __init__(self, root_dir, threshold=1e-6):
        self.root_dir = root_dir
        self.pair_files = [f[:-12] for f in os.listdir(root_dir) if f.endswith('_max_ids.txt')]
        self.threshold = threshold

    def __len__(self):
        return len(self.pair_files)

    def __getitem__(self, idx):
        txt_filename = self.pair_files[idx] + '.txt'
        label_filename = self.pair_files[idx] + '_max_ids.txt'

        txt_file = os.path.join(self.root_dir, txt_filename)
        label_file = os.path.join(self.root_dir, label_filename)

        data = np.loadtxt(txt_file).astype(np.float32)
        # 读取标签并将可能的科学计数法表示的值转换为整数索引
        labels_sci = np.loadtxt(label_file).astype(np.float32)

        # 处理标签维度，确保其为可迭代的数组
        if labels_sci.ndim == 0:
            labels_sci = np.array([labels_sci])

        label_indices = [int('{:.0f}'.format(label)) for label in labels_sci]

        # 将索引及其前后两个位置置1，其余位置置0，并将形状设为和数据一样
        binary_label = np.zeros_like(data)

        for index in label_indices:
            binary_label[index] = 1
            binary_label[index-1] = 1
            binary_label[index-2] = 1
            binary_label[index+1] = 1
            binary_label[index+2] = 1

        # print(f"data shape: {data.shape}")
        # print(f"label indices: {label_indices}")
        # print(f"binary_label shape: {binary_label.shape}")
        # print(f"binary_label : ",binary_label)

        sample = {
            'input': torch.from_numpy(data).unsqueeze(0),
            'label': torch.from_numpy(binary_label)
        }

        #print("pair of sample:", sample)

        return sample




def accuracy(outputs, labels):
    # 将输出转换为二元预测，使用阈值 0.5
    predictions = (torch.sigmoid(outputs) > 0.5).float()

    # 计算准确率
    correct = (predictions == labels).float()
    accuracy = correct.sum() / (len(correct) * len(correct[0]))

    return accuracy




def train_model(model, dataloader, criterion, optimizer, num_epochs=10):
    for epoch in range(num_epochs):
        model.train()
        total_loss = 0.0
        total_accuracy = 0.0

        for batch in dataloader:
            inputs = batch['input']
            labels = batch['label']
            labels = labels.unsqueeze(1)

            optimizer.zero_grad()
            outputs = model(inputs)

            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # 计算准确率
            acc = accuracy(outputs, labels)
            total_accuracy += acc.item()
            total_loss += loss.item()

        average_loss = total_loss / len(dataloader)
        average_accuracy = total_accuracy / len(dataloader)

        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {average_loss:.4f}, Accuracy: {average_accuracy:.4f}')





def test_model(model, dataloader, criterion):
    model.eval()
    total_loss = 0.0
    total_accuracy = 0.0

    with torch.no_grad():
        for batch in dataloader:
            inputs = batch['input']
            labels = batch['label']
            labels = labels.unsqueeze(1)

            outputs = model(inputs)

            loss = criterion(outputs, labels)

            # Calculate accuracy
            acc = accuracy(outputs, labels)
            total_accuracy += acc.item()
            total_loss += loss.item()

    average_loss = total_loss / len(dataloader)
    average_accuracy = total_accuracy / len(dataloader)

    print(f'Test Loss: {average_loss:.4f}, Test Accuracy: {average_accuracy:.4f}')

def save_model(model, path):
    torch.save(model.state_dict(), path)

def load_model(model, path):
    model.load_state_dict(torch.load(path))
    model.eval()



##################
# 创建数据集和数据加载器

train_dir = "C:/Users/SONGSHITAO/Desktop/1d_data/data/train/"
train_dataset = CustomDataset(train_dir)
train_dataloader = DataLoader(train_dataset, batch_size=16, shuffle=True)


test_dir = "C:/Users/SONGSHITAO/Desktop/1d_data/data/test/"
test_dataset = CustomDataset(test_dir)
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False)

# 创建模型、定义损失函数和优化器
# Assuming input data has 1 channel

model = UNet1D(in_channels=1)

# Binary Cross Entropy Loss for binary classification
#criterion = nn.BCELoss()  # 二分类问题使用Binary Cross Entropy Loss

criterion = nn.BCEWithLogitsLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001)

# 训练模型
train_model(model, train_dataloader, criterion, optimizer, num_epochs=100)

# Save the trained model
model_path = "C:/Users/SONGSHITAO/Desktop/1d_data/saved_model.pth"
save_model(model, model_path)

# Test the model
load_model(model, model_path)
test_model(model, test_dataloader, criterion)


