import os
import json
import torch.nn
import logging
from tool import torch_tool, attentions

logging.basicConfig(level=logging.INFO, format='[%(levelname)s %(filename)s %(funcName)s:%(lineno)d] %(message)s')
log = logging.getLogger(__file__)


class SequenceClassifierModule(torch.nn.Module):
    # 输入序列数据  输出分类
    def __init__(self, type_num, dim=100, head_num=2):
        super().__init__()
        self.dim = dim
        rtype_num = 1  # 分类的维度（规则rule）
        self.query = torch.nn.Parameter(torch.rand(1, rtype_num, dim, requires_grad=True), requires_grad=True)
        self.attn = attentions.MultiHeadedAttention(h=head_num, d_model=dim)
        self.line = torch.nn.Linear(dim, type_num)
        torch_tool.init_linear(self.line)

    def forward(self, x):
        # x~(bsz, seq_len, dim)   class_gold~(bsz)
        bsz, seq_len, dim = x.size()
        query = self.query.expand(bsz, -1, -1)  # (bsz, rtype_num=1, dim)
        pool_x = self.attn(query, x, x)  # pool_x~(bsz, rtype_num=1, dim)
        pool_x = pool_x.squeeze(1)  # pool_x~(bsz, dim)
        logit = self.line(pool_x)  # pool_x~(bsz, type_num)
        return logit


class ResNetEncoder(torch.nn.Module):
    def __init__(self, nodes):
        super().__init__()
        assert all([n == nodes[0] for n in nodes[1:]])
        lines = []
        for i in range(1, len(nodes)):
            lines.append(torch.nn.Linear(nodes[i - 1], nodes[i]))
        self.lines = torch.nn.ModuleList(lines)
        self.out_layer = len(nodes) - 1

    def set_layer(self, out_layer, frozen_layer=0):
        self.out_layer = out_layer
        for i in range(frozen_layer):
            p = self.lines[i]
            torch_tool.frozen(p)

    def forward(self, x):
        for i in range(self.out_layer):
            line = self.lines[i]
            x = x + torch.relu(line(x))
        return x


class Classifier(torch.nn.Module):
    # 分类器
    def __init__(self, type_list, vocab_size, emb_dim, line_nodes, head_num):
        super().__init__()
        assert emb_dim == line_nodes[0]
        self.types = type_list
        self.emb = torch.nn.Embedding(vocab_size, emb_dim)
        self.input_encoder = ResNetEncoder(line_nodes)
        self.classifier = SequenceClassifierModule(len(self.types), line_nodes[-1], head_num=head_num)
        torch_tool.init_model_weights(self)

    def set_layer(self, out_layer, frozen_layer=0):
        if frozen_layer > 0:
            torch_tool.frozen(self.emb)
        self.input_encoder.set_layer(out_layer, frozen_layer=frozen_layer)

    def forward(self, input_ids, ):
        x = self.emb(input_ids)
        x = self.input_encoder(x)
        pred_logit = self.classifier(x)  # ~(bsz, type_num)  概率
        pred_idx = pred_logit.argmax(dim=1).tolist()
        pred_types = [self.types[i] for i in pred_idx]
        out = {'pred_logit': pred_logit, 'pred_types': pred_types, 'pred_idx': pred_idx}
        return out


def create_model(config, model_path=None):
    # 创建模型, 并载入模型权重文件, 并转移到对应计算设备(GPU/CPU)
    info_path = os.path.join(config.data_dir, 'data_info.json')
    data_info = json.load(open(info_path))
    model = Classifier(data_info['type_list'], config.vocab_size, config.emb_dim,
                       line_nodes=config.line_nodes, head_num=config.classifier_head_num)
    if model_path:
        if os.path.isfile(model_path):
            torch_tool.log.info(f'载入模型文件 {model_path}')
            model = torch_tool.load_cpu_model(model, model_path)
        else:
            torch_tool.log.warning(f'模型文件不存在 {model_path}')
    return torch_tool.cuda(model)


def test_model():
    type_list = ['0', '1']
    emb_dim = 50
    line_nodes = [50, 50, 50, 50, 50, 50, 50]
    vocab_size = 32000
    head_num = 2
    model = Classifier(type_list, vocab_size, emb_dim, line_nodes, head_num)
    log.info(model)
    x = torch.zeros(1, 40).long()
    y = model(x)
    log.info(y)


if __name__ == '__main__':
    test_model()
