import os
import yaml
import torch
import numpy as np
import re

def load_dataset(path):
	"""加载诗词数据集"""
	dataset = np.load(path, allow_pickle=True)
	poems = dataset['data']
	word2ix = dataset['word2ix'].item()
	ix2word = dataset['ix2word'].item()

	# print(f"数据集加载完成: {len(poems)} 首诗, 词汇表大小: {len(word2ix)}")
	# print(f"特殊标记: <START>: {word2ix['<START>']}, <EOP>: {word2ix['<EOP>']}, <PAD>: {word2ix['</s>']}")

	return word2ix, ix2word, poems


def save_model(model, optimizer, path):
	"""保存模型和优化器状态"""
	torch.save({
		'model_state_dict': model.state_dict(),
		'optimizer_state_dict': optimizer.state_dict()
	}, path)


def load_model_state(model, optimizer, path):
	"""加载模型和优化器状态"""
	checkpoint = torch.load(path)
	model.load_state_dict(checkpoint['model_state_dict'])
	optimizer.load_state_dict(checkpoint['optimizer_state_dict'])


def update_training_history(history_file, entry):
	"""更新训练历史文件"""
	if os.path.exists(history_file):
		with open(history_file, 'r', encoding='utf-8') as f:
			history = yaml.safe_load(f) or []
	else:
		history = []

	history.append(entry)

	with open(history_file, 'w', encoding='utf-8') as f:
		yaml.dump(history, f, default_flow_style=False)


import re  # 新增导入正则表达式模块
import torch
import numpy as np


def load_dataset(path):
	"""加载诗词数据集"""
	dataset = np.load(path, allow_pickle=True)
	poems = dataset['data']
	word2ix = dataset['word2ix'].item()
	ix2word = dataset['ix2word'].item()

	print(f"数据集加载完成: {len(poems)} 首诗, 词汇表大小: {len(word2ix)}")
	print(f"特殊标记: <START>: {word2ix['<START>']}, <EOP>: {word2ix['<EOP>']}, <PAD>: {word2ix['</s>']}")

	return word2ix, ix2word, poems


def save_model(model, optimizer, path):
	"""保存模型和优化器状态"""
	torch.save({
		'model_state_dict': model.state_dict(),
		'optimizer_state_dict': optimizer.state_dict()
	}, path)


def load_model_state(model, optimizer, path):
	"""加载模型和优化器状态"""
	checkpoint = torch.load(path)
	model.load_state_dict(checkpoint['model_state_dict'])
	optimizer.load_state_dict(checkpoint['optimizer_state_dict'])


def update_training_history(history_file, entry):
	"""更新训练历史文件"""
	if os.path.exists(history_file):
		with open(history_file, 'r', encoding='utf-8') as f:
			history = yaml.safe_load(f) or []
	else:
		history = []

	history.append(entry)

	with open(history_file, 'w', encoding='utf-8') as f:
		yaml.dump(history, f, default_flow_style=False)


import re


def generate_poem(model, start_words, ix2word, word2ix, max_len=200, temperature=1.0, repetition_penalty=1.2,
				  device='cpu'):
	"""生成连贯的古诗，仅保留前四行（四个标点符号）"""
	model.eval()

	# 清洗输入开头（保留中文标点）
	start_words = re.sub(r'[^\w\s，。？！]', '', start_words)
	input_ids = [word2ix['<START>']]
	for char in start_words:
		input_ids.append(word2ix.get(char, word2ix['<START>']))

	generated = []
	punctuation_chars = {'，', '。', '？', '！'}  # 定义换行标点
	punctuation_count = 0  # 记录标点符号数量

	with torch.no_grad():
		for _ in range(max_len):
			input_tensor = torch.tensor([input_ids], dtype=torch.long).to(device)
			output = model(input_tensor)
			next_token_logits = output[0, -1, :]

			# 应用重复字惩罚
			for token in generated:
				if next_token_logits[token] < 0:
					next_token_logits[token] *= repetition_penalty
				else:
					next_token_logits[token] /= repetition_penalty

			# 应用温度
			next_token_logits = next_token_logits / temperature
			probs = torch.softmax(next_token_logits, dim=-1)
			next_token = torch.multinomial(probs, num_samples=1).item()

			# 遇到结束符或生成四个标点后停止
			if next_token == word2ix['<EOP>'] or punctuation_count >= 4:
				break

			char = ix2word[next_token]
			generated.append(next_token)
			input_ids.append(next_token)

			# 统计标点符号
			if char in punctuation_chars:
				punctuation_count += 1

	# 转换为文本并清洗（保留中文标点）
	poem_text = start_words + ''.join([ix2word[idx] for idx in generated])
	poem_text = re.sub(r'[^\w\s，。？！]', '', poem_text)

	# 按标点符号换行，仅保留前四行
	formatted_poem = ''
	line_count = 0
	for char in poem_text:
		formatted_poem += char
		if char in punctuation_chars:  # 遇到标点符号则换行
			formatted_poem += '\n'
			line_count += 1
			if line_count >= 4:  # 达到四行后停止
				break

	return formatted_poem