import torch
import torch.nn as nn
import math


class PositionalEncoding(nn.Module):
	def __init__(self, d_model, max_len=5000):
		super(PositionalEncoding, self).__init__()
		pe = torch.zeros(max_len, d_model)
		position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
		div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
		pe[:, 0::2] = torch.sin(position * div_term)
		pe[:, 1::2] = torch.cos(position * div_term)
		self.register_buffer('pe', pe)

	def forward(self, x):
		"""
        Args:
            x: Tensor, shape [batch_size, seq_len, embedding_dim]
        """
		return x + self.pe[:x.size(1), :]


class PoetryModel(nn.Module):
	def __init__(self, vocab_size, d_model=512, nhead=8, num_layers=6, dim_feedforward=2048, max_seq_len=125):
		super(PoetryModel, self).__init__()
		self.d_model = d_model
		self.embedding = nn.Embedding(vocab_size, d_model)
		self.pos_encoder = PositionalEncoding(d_model, max_seq_len)

		# Transformer解码器层
		decoder_layer = nn.TransformerDecoderLayer(
			d_model=d_model,
			nhead=nhead,
			dim_feedforward=dim_feedforward,
			dropout=0.1,
			batch_first=True
		)

		self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers)
		self.output_layer = nn.Linear(d_model, vocab_size)

		# 初始化权重
		self._init_weights()

	def _init_weights(self):
		for p in self.parameters():
			if p.dim() > 1:
				nn.init.xavier_uniform_(p)

	def forward(self, x):
		"""
        Args:
            x: Tensor, shape [batch_size, seq_len]
        """
		# 嵌入层
		x = self.embedding(x) * math.sqrt(self.d_model)

		# 添加位置编码
		x = self.pos_encoder(x)

		# 创建因果掩码
		seq_len = x.size(1)
		mask = torch.triu(torch.ones(seq_len, seq_len) * float('-inf'), diagonal=1).to(x.device)

		# 创建与输入形状匹配的memory
		batch_size = x.size(0)
		memory = torch.zeros(batch_size, 1, self.d_model, device=x.device)

		# Transformer解码
		output = self.transformer_decoder(x, memory, tgt_mask=mask)

		# 输出层
		logits = self.output_layer(output)

		return logits


def create_model(vocab_size, config):
	"""根据配置创建模型"""
	model = PoetryModel(
		vocab_size=vocab_size,
		d_model=config['d_model'],
		nhead=config['nhead'],
		num_layers=config['num_layers'],
		dim_feedforward=config['dim_feedforward'],
		max_seq_len=config['max_seq_len']
	)
	return model