import preprocess.Data2Feature as D2F
import preprocess.stopwords as Stopwords
import preprocess.url_parser as Parser
import preprocess.gram as Gram
import jieba
import numpy as np
import fasttext
from sklearn.preprocessing import LabelEncoder
   
NAME = 'Fasttext'

class Fit:
    
    # ====================================================================================================================
    # Classifier: Fasttext 
    #     For more information of fasttext algorithm, please refer to https://github.com/salestock/fastText.py.
    #
    # Input: X_train,Y_train,X_test,Y_test,CutMethod
    #
    # Output: predictions of X_test
    #
    # Note: 1. If CutMethod == 'char': 
    #           use each character in titles as features
    #       Elif CutMethod == 'parse': 
    #           parse title to each word as features 
    #     2. Usually, fasttext only deal with text information. 
    #       However,we cut titles and websites and put them together as the feature and the input of fasttext algorithm.
    # =======================
    # Results:
    #
    # CutMethod == 'char':
    #     Fasttext-char: Traning Set Accuracy: 0.997551
    #     Fasttext-char: Test Set Accuracy: 0.834362
    # CutMethod == 'parse':
    #     Fasttext-parse: Traning Set Accuracy: 0.998887
    #     Fasttext-parse: Test Set Accuracy: 0.836420
    # =====================================================================================================================
    
    # set params
    FILE_TRAIN = 'Data/fasttext_train.txt'
    FILE_TEST = 'Data/fasttext_test.txt'
    FILE_MODEL = 'Data/fasttext_model'
    N1 = 20 # control the begining of all-grams of websites
    N2 = 20 # control the end of all-grams of websites   
    Add_Web = True # if Add_Web == False: we only use title information
    
    def __init__(self,X_train,Y_train,X_test,Y_test,CutMethod):
        self.predictions = self.fit(X_train,Y_train,X_test,Y_test,CutMethod)
        
    def write_to_file(self,file_name,X,Y,cutmethod):
        
        # ===============================================================================================
        # Extract features from websites and titles, then write to files as the input of fasttext
        #     Each line starts with __label__ according to the requirment of fasttext
        #     Then follows with features (either features from titles or from websites)
        # As fasttext need special format of input, we do not use Data2Feature to generate features here.
        # ================================================================================================
        
        file_w = open(file_name,'w',encoding="utf-8")

        for i in range(len(X)):
            y = Y[i]
            web = X[i][0]
            title = X[i][1]
            file_w.write('__label__' + str(y) + '\t') 
            
            if cutmethod == 'parse': # cut title as words
                segs = jieba.cut(title.strip(),cut_all=True)
                for seg in segs:
                    if len(seg) >= 1 and seg != ' ':# and Stopwords.justify_stop_words(seg) == False: [changable]
                        #seg = seg.encode('utf-8')
                        file_w.write(seg + ' ')
                        
                if self.Add_Web == True: # Add n-grams of websites as features too
                    tokens = Parser.UrlParser(web).names
                    features = []
                    for token in tokens:
                        for i in range(self.N1,self.N2+1):
                            grams = Gram.letter_n_gram(token,i)
                            for gram in grams:
                                if gram not in features: # eliminate repeated n-grams in each row [changable]
                                    features.append(gram) 
                                    #gram = gram.encode('utf-8')
                                    file_w.write(gram + ' ')

            elif cutmethod == 'char': # cut title as characters
                segs = title.strip()
                for seg in segs:
                    if Stopwords.justify_stop_words(seg) == False:
                        #seg = seg.encode('utf-8')
                        file_w.write(seg + ' ')
                        
                if self.Add_Web == True: # Add n-grams of websites as features too
                    tokens = Parser.UrlParser(web).names
                    features = []
                    for token in tokens:
                        for i in range(self.N1,self.N2+1):
                            grams = Gram.letter_n_gram(token,i)
                            for gram in grams:
                                if gram not in features: # eliminate repeated n-grams in each row [changable]
                                    features.append(gram)
                                    #gram = gram.encode('utf-8')
                                    file_w.write(gram + ' ')
            else:
                print ('cutmethod should be one of \'char\' and \'parse\'')
            file_w.write('\n')
        file_w.close()

    def generate_files(self,X_train,Y_train,X_test,Y_test,cutmethod='char'):
        
        le = LabelEncoder() # tranfer labels from string('Art') to number(1)
        le.fit(np.unique(np.hstack((Y_train,Y_test))))
        Y_train = le.transform(Y_train)
        Y_test = le.transform(Y_test)
        
        self.write_to_file(self.FILE_TRAIN,X_train,Y_train,cutmethod)
        self.write_to_file(self.FILE_TEST,X_test,Y_test,cutmethod)
        return le # keep encoder to get original label

    def predict_testset(self,classifier,le):
        """
        file_r = open(self.FILE_TEST,'r')
        lines = file_r.readlines()
        predictions = []
        for line in lines:
            feature = line.split('\t')[1]
            prediction = classifier.predict([feature])
            prediction = le.inverse_transform(int(prediction[0][0])) # decode
            predictions.append(prediction)
        return predictions
        """
        file_r = open(self.FILE_TEST, 'r')
        lines = file_r.readlines()
        num = len(lines)
        #predictions = []
        features = []
        for line in lines:
            features.append(line.split('\t')[1])

        predictions = classifier.predict(features)
        predictions = np.array(predictions).reshape((num))
        predictions = le.inverse_transform(predictions.astype(np.int32))  # decode
        return predictions


    def fit(self,X_train,Y_train,X_test,Y_test,CutMethod):
                
        # extract features from websites and titles and write them to files
        le = self.generate_files(X_train,Y_train,X_test,Y_test,CutMethod)
        
        # fit classifiers
        classifier = fasttext.supervised(self.FILE_TRAIN,
                              self.FILE_MODEL,
                              label_prefix='__label__',
                              loss = 'softmax',    # loss function {ns, hs, softmax}
                              lr = 0.6,         # learning rate 
                              epoch = 60,       # number of epochs 
                              dim = 100,       # size of word vectors 
                              silent=1)       # disable the log output from the C++ extension
        
        # test
        result = classifier.test(self.FILE_TRAIN)
        print ('Fasttext-%s: Traning Set Accuracy: %f' % (CutMethod,result.precision))
        result = classifier.test(self.FILE_TEST)
        print ('Fasttext-%s: Test Set Accuracy: %f' % (CutMethod,result.precision))
        
        # get predictions for each data in testset
        predictions = self.predict_testset(classifier,le)
        return predictions


