import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import re
import json


def read_json(file_path):
    with open(file_path, 'r', encoding='utf-8') as f:
        file = json.load(f)
    return file

def get_root_dir(path):
    path_list=path.split(os.path.sep)
    index=path_list.index("featurelib")
    return os.path.sep.join(path_list[:index+1])

class Tokenizer():
    def __init__(self, segmenter=None):
        self.segmenter = segmenter
        self.pattern = re.compile(r'[\n\r\t]|\d|[\!\"\#\$\%\&\\\'\(\)\*\+\,\.\/\:\;\<\=\>\?\@\[\\\\\]\^\`\{\|\}\~\，\。\？\！\：\、\《\》\ ]|([\-\_])')

    def contains_arabic(self, text):
        pattern = r'[\u0600-\u06FF]+'
        matches = re.search(pattern, text)
        if matches:
            return True
        return False
    
    def viterbi_tokenize(self, sentence):
        token_list = []
        tokens = re.split(self.pattern, sentence)
        for token in tokens:
            if token and token not in '\-\_' and not self.contains_arabic(token):
                token_list.extend(self.segmenter.split(token))
            elif token and (token in '\-\_' or self.contains_arabic(token)):
                token_list.append(token)
        return token_list
    
    def tokenize(self, sentence):
        token_list = []
        tokens = re.split(self.pattern, sentence)
        for token in tokens:
            if token:
                token_list.append(token)
        return token_list

class TextCNN(nn.Module):
    def __init__(self,                 
                 vocab, 
                 num_classes, 
                 embedding_dim=300,
                 drop_out=0.6, 
                 num_filters=256, 
                 filter_sizes=[2,3,4]):
        super(TextCNN, self).__init__()
        self.embedding = nn.Embedding(len(vocab), embedding_dim, padding_idx=len(vocab)-2)
        self.convs = nn.ModuleList(
            [nn.Conv2d(1, num_filters, (k, embedding_dim)) for k in filter_sizes])
        self.dropout = nn.Dropout(drop_out)
        self.fc = nn.Linear(num_filters * len(filter_sizes), num_classes)

    def conv_and_pool(self, x, conv):
        x = F.relu(conv(x)).squeeze(3)
        x = F.max_pool1d(x, x.size(2)).squeeze(2)
        return x

    def forward(self, x):
        out = self.embedding(x)
        out = out.unsqueeze(1)
        out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1)
        out = self.dropout(out)
        out = self.fc(out)
        return out


class SmsEnCate0V1ClfModel(object):
    def __init__(self):
        self.ROOT_DIR = get_root_dir(os.path.abspath("."))
        self.CONF_DIR = os.path.join(
            self.ROOT_DIR, "feature_conf", "sms", "un", "sms_en_cate0_v1"
        )
        self.MODEL_CONF_DIR = os.path.join(self.CONF_DIR, "model_conf")
        np.random.seed(42)
        torch.manual_seed(42)
        torch.cuda.manual_seed_all(42)
        torch.backends.cudnn.deterministic = True
        # torch.set_num_threads(16)
        self.tokenizer = Tokenizer()
        self.word2id = read_json(os.path.join(self.MODEL_CONF_DIR,'word2id.json'))
        self.model = TextCNN(vocab=self.word2id,  num_classes=27)
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.model.load_state_dict(torch.load(os.path.join(self.MODEL_CONF_DIR,'TextCNN.pt'), map_location=self.device.type))
        self.model.eval()

    def load_dataset(self, msg_list, pad_size=32):
        words_line_list = []
        for msg in msg_list:
            content = msg.strip()
            words_line = []
            token = self.tokenizer.tokenize(content)
            if pad_size:
                if len(token) < pad_size:
                    token.extend(['<PAD>'] * (pad_size - len(token)))
                else:
                    token = token[:pad_size]
            # word to id
            for word in token:
                words_line.append(self.word2id.get(word, self.word2id.get('<UNK>')))
            words_line_list.append(words_line)
        return words_line_list

    def predict(self, text_list):
        inputs = self.load_dataset(text_list)
        with torch.no_grad():
            inputs = torch.LongTensor(inputs).to(self.device)
            outputs = self.model(inputs)
            predict_re = torch.max(outputs.data, 1)[1].cpu().numpy()
            probs = F.softmax(outputs).tolist()
        return predict_re, probs