import re
import torch
import 文本预处理 as text
import matplotlib.pyplot as plt
# with open('fra1.txt', 'r', encoding='utf-8') as f:
#     content = f.read()
#     f.close()
#
# new_content = re.sub(r' ', '', content)
#
# with open('fra1.txt', 'w', encoding='utf-8') as f:
#     f.write(new_content)
#     f.close()


english_list = []
french_list = []
with open('fra1.txt', 'r', encoding='utf-8') as f:
    lines = f.readlines()
    # print(lines[:15])
    for line in lines:
        line = line.strip()
        parts = line.split('\t')
        if(len(parts) == 2):
            english = parts[0].strip()
            french = parts[1].strip()
            english_list.append(english)
            french_list.append(french)
    f.close()

# print(english_list[:10])
# print(french_list[:10])

def process_list(input_list):
    result = []
    for element in input_list:
        # 转为小写
        lower_element = element.lower()
        # 分词，将标点符号分开
        words = re.findall(r'\b\w+\b|[^\w\s]', lower_element)
        result.append(words)
    return result

# source = process_list(english_list)
# target = process_list(french_list)
# print(source[:6], target[:6])

#@save
def show_list_len_pair_hist(legend, xlabel, ylabel, xlist, ylist):
    """绘制列表长度对的直方图"""
    _, _, patches = plt.hist(
        [[len(l) for l in xlist], [len(l) for l in ylist]])
    plt.xlabel(xlabel)
    plt.ylabel(ylabel)
    for patch in patches[1].patches:
        patch.set_hatch('/')
    plt.legend(legend)
    plt.show()

# show_list_len_pair_hist(['source', 'target'], '# tokens per sequence', 'count', source, target);

# src_vocab = text.Vocab(source, min_freq=1, reserved_tokens=['<pad>', '<bos>', '<eos>'])
#print(len(src_vocab))   #11479 16306

def truncate_pad(line, num_steps, padding_token):
    if len(line) > num_steps:
        return line[:num_steps]
    return line + [padding_token] * (num_steps - len(line))

# print(truncate_pad(src_vocab[source[-1]], 85, src_vocab['<pad>']))

# def build_batches(lines, vocab, batch_size):
#     sorted_lines = sorted(lines, key=lambda x:len(x))
#     batches = []
#     for i in range(0, len(sorted_lines), batch_size):
#         batch = sorted_lines[i:i+batch_size]
#         num_step = len(batch[:-1]) + 1
#         batch = [vocab[sentence] + [vocab['<eos>']] for sentence in batch]
#         padded_batch = [truncate_pad(sentence, num_step, vocab['<pad>']) for sentence in batch]
#         batch = torch.tensor(padded_batch)
#         valid_len = (batch != vocab['<pad>']).type(torch.float32).sum(dim=1)
#         batches.append((batch, valid_len))
#     return batches


def build_batches(source, target, source_vocab, target_vocab, batch_size):
    combined = sorted(zip(source, target), key=lambda x: len(x[0]))
    sorted_source, sorted_target = zip(*combined)

    batches = []
    for i in range(0, len(sorted_source), batch_size):
        # 划分批次
        source_batch = sorted_source[i: i + batch_size]
        target_batch = sorted_target[i: i + batch_size]

        # 确定 source 批次的 num_steps
        source_num_steps = len(source_batch[-1]) + 1  # 加 1 是为了添加 <eos>
        source_batch = [source_vocab[sentence] + [source_vocab['<eos>']] for sentence in source_batch]
        # 填充 source 序列
        padded_source_batch = [truncate_pad(sentence, source_num_steps, source_vocab['<pad>']) for sentence in source_batch]
        source_batch = torch.tensor(padded_source_batch)
        # 计算 source 有效长度
        source_valid_len = (source_batch != source_vocab['<pad>']).type(torch.float32).sum(dim=1)

        # 确定 target 批次的 num_steps
        target_num_steps = len(target_batch[-1]) + 1
        target_batch = [target_vocab[sentence] + [target_vocab['<eos>']] for sentence in target_batch]
        # 填充 target 序列
        padded_target_batch = [truncate_pad(sentence, target_num_steps, target_vocab['<pad>']) for sentence in target_batch]
        target_batch = torch.tensor(padded_target_batch)
        # 计算 target 有效长度
        target_valid_len = (target_batch != target_vocab['<pad>']).type(torch.float32).sum(dim=1)

        batches.append((source_batch, source_valid_len, target_batch, target_valid_len))
    return batches



def load_data(batch_size):
    source = process_list(english_list)
    target = process_list(french_list)
    src_vocab = text.Vocab(source, min_freq=1, reserved_tokens=['<pad>', '<bos>', '<eos>'])
    tar_vocab = text.Vocab(target, min_freq=1, reserved_tokens=['<pad>', '<bos>', '<eos>'])
    batches = build_batches(source, target, src_vocab, tar_vocab, batch_size)
    for batch in batches[:2]:
        source_batch, source_valid_len, target_batch, target_valid_len = batch[0], batch[1], batch[2], batch[3]
        yield source_batch, source_valid_len, target_batch, target_valid_len
    return src_vocab, tar_vocab



if __name__ == "__main__":
    if torch.cuda.is_available():
        try:
            # 确保CUDA设备存在且可用
            device_count = torch.cuda.device_count()
            assert device_count > 0, "No CUDA devices available."
            device = torch.device("cuda:0")
            # 测试分配张量以验证设备可用性
            test_tensor = torch.tensor([1.0], device=device)
            print(f"Using GPU device 0: {torch.cuda.get_device_name(device)}")
        except Exception as e:
            print(f"CUDA device 0 unavailable: {e}, falling back to CPU.")
            device = torch.device("cpu")
    else:
        device = torch.device("cpu")

    batch_size = 32

    # 创建生成器对象
    gen = load_data(batch_size)
    try:
        while True:
            # 接收 yield 产生的值
            X, X_valid_len, Y, Y_valid_len = next(gen)
            print('X:', X)
            print('X的有效长度:', X_valid_len)
            print('Y:', Y)
            print('Y的有效长度:', Y_valid_len)

    except StopIteration as e:
        # 接收 return 的值
        src_vocab, tar_vocab = e.value