from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfTransformer

import jieba

from glob import glob
import os
import pickle

import keras
from keras.layers import (
    Input, Conv2D, MaxPool2D, LeakyReLU, BatchNormalization,
    LocallyConnected2D, Flatten, Dense, Dropout, Reshape, ZeroPadding2D,
    UpSampling2D, Conv2DTranspose, Concatenate)
from keras.models import Model
import keras.backend as K

import numpy as np
from PIL import Image

try:
    from models import Classifier
except ImportError:
    from libs.models import Classifier

def formatInput(input_data, chi_sim):
    data = input_data
    if isinstance(data, str):
        data = [data]

    def formatText(txt, chi_sim):
        txt = ''.join([w for w in txt if w in chi_sim + ['\n']])
        txt = txt.replace('\n', ' ')
        seg = ' '.join(jieba.lcut(txt))
        seg = ' '.join(seg.split())
        return seg

    return [formatText(txt, chi_sim) for txt in data]

def cleanData(dirname, txtname, chi_sim):
    paths = glob(os.path.join(dirname, '*.txt'))
    with open(txtname, 'w') as F:
        for path in tqdm(paths):
            with open(path) as f:
                txt = f.read()
                txt = ''.join([w for w in txt if w in chi_sim + ['\n']])
                txt = txt.replace('\n', ' ')
                seg = ' '.join(jieba.lcut(txt))
                seg = ' '.join(seg.split())
                F.write(seg + '\n')

class MNBClassifier:
    def __init__(self):
        self.model = None
    
    def train(self, p, n):
        with open(p) as f:
            txt0 = [line.replace('\n', '') for line in f.readlines()]

        with open(n) as f:
            txt1 = [line.replace('\n', '') for line in f.readlines()]

        vectorizer = CountVectorizer()
        X_train_termcounts = vectorizer.fit_transform(txt0 + txt1)

        tfidf_transformer = TfidfTransformer()
        X_train_tfidf = tfidf_transformer.fit_transform(X_train_termcounts)

        classifier = MultinomialNB().fit(X_train_tfidf, [0] * len(txt0) + [1] * len(txt1))

        self.model = [vectorizer, tfidf_transformer, classifier]
    
    def predict(self, input_data):
        if self.model is None: raise ValueError('model is None, need to train or load')
        vectorizer, tfidf_transformer, classifier = self.model
        X_input_termcounts = vectorizer.transform(input_data)
        X_input_tfidf = tfidf_transformer.transform(X_input_termcounts)
        return classifier.predict(X_input_tfidf)
    
    def save(self, filename):
        with open(filename, 'wb') as f:
            pickle.dump(self.model, f)

    def load(self, filename):
        with open(filename, 'rb') as f:
            self.model = pickle.load(f)


class DeepMNBClassifier(Classifier):
    def __init__(self, datasets_path, model_filename, input_shape=(128, 128, 3),
                 clean=False, train=True,
                 batch_size=50, epochs_num=30):

        super().__init__(
                datasets_path, model_filename, input_shape,
                clean, train, batch_size, epochs_num)

        self.labels = sorted(os.listdir(datasets_path))

    def nnet(self):
        input_X = Input(shape=self.input_shape, name='InputX')

        def ConvNet(input_X, feature_size):
            conv = Conv2D(feature_size, (3, 3), activation='relu', padding='same')(input_X)
            pool = MaxPool2D((2, 2))(conv)
            return pool

        conv1 = ConvNet(input_X, 1)
        conv2 = ConvNet(conv1, 4)
        conv3 = ConvNet(conv2, 8)
        conv4 = ConvNet(conv3, 16)
        conv5 = ConvNet(conv4, 32)

        flatten_conv3 = Flatten()(conv3)
        flatten_conv4 = Flatten()(conv4)
        flatten_conv5 = Flatten()(conv5)

        dense_conv3 = Dense(256, activation='relu')(flatten_conv3)
        dense_conv4 = Dense(256, activation='relu')(flatten_conv4)
        dense_conv5 = Dense(256, activation='relu')(flatten_conv5)

        merge = Concatenate()([dense_conv3, dense_conv4, dense_conv5])
        dense = Dense(256, activation='relu')(merge)
        dropout = Dropout(0.15)(dense)

        output_Y = Dense(self.classes_num, activation='softmax', name='OutputY')(dropout)

        return Model(inputs=input_X, outputs=output_Y)

    def saveModel(self):
        self.model.save(self.model_filename)
