import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from efficientnet_pytorch import EfficientNet
import torch.nn as nn
import torch
import torchvision
import torch
from PIL import Image
import time, io
from torch.nn import functional as F
from torchvision import transforms
import torch.backends.cudnn as cudnn
import base64, os
import numpy as np
from concurrent.futures import ThreadPoolExecutor, as_completed
import sys
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.externals import joblib

model = None
use_gpu = True
num_gpu = 2


def efficient_feature(net, input_data):
    s = input_data
    for key in model.module._modules.keys():
        if key != '_fc':
            if key != '_blocks':
                s = model.module._modules[key](s)
            else:
                features = list(model.module._blocks)
                for l in features:
                    s = l(s)
    sm = s.cpu().detach().numpy().reshape(1536, -1)
    pca = PCA(whiten=True, n_components=1)
    pca_fea = pca.fit_transform(sm).reshape(1, -1)
    return pca_fea


def load_model():
    """Load the pre-trained model, you can use your model just as easily.
    """
    global model_integration, feature_class, model

    weight_path = r'/home/ubuntu/data/testdockerfile/weights'
    model_integration = {}
    feature_class = {}

    for file in os.listdir(weight_path):
        resume = os.path.join(weight_path, file, 'model_best.pth.tar')
        n_class = int(file[-1])
        feature_class.update({str(file[8:10]): n_class})

        model_name = 'efficientnet-b3'
        model = EfficientNet.from_pretrained(model_name)
        model._fc = nn.Linear(1536, n_class)

        if use_gpu:
            model = nn.DataParallel(model, device_ids=range(num_gpu))
            model.cuda()
            cudnn.benchmark = True
        checkpoint = torch.load(resume)
        model.load_state_dict(checkpoint['state_dict'])
        model.eval()
        model_integration.update({str(file[8:10]): model})
    print('Loaded pretrained weights for efficientnet-b3')


def feature_tongue(file):
    result = {'success': False}

    def ThreadPool(image, keys, model, result):
        feature = efficient_feature(model, image)
        # print(feature)

        preds = F.softmax(model(image), dim=1)
        results = preds[0].cpu().detach().numpy().reshape(1, -1)
        result['predictions'].append(results)
        result['image_feature'].append(feature)
        return result

    image = Image.open(file)
    image = prepare_image(image, target_size=(448, 448))
    result['predictions'] = list()
    result['image_feature'] = list()
    pool = ThreadPoolExecutor(max_workers=1)
    futures = {
        pool.submit(
            ThreadPool,
            image,
            keys,
            model,
            result
        ):
            model for keys, model in model_integration.items()
    }
    for future in as_completed(futures):
        result = future.result()
    # print(result['predictions'])
    # sorted_result = sorted(result['predictions'], key=lambda key: key[0], reverse=False)
    # res = [sor for _, sor in sorted_result]

    return result


def pca_fea(tongue_file):
    results = feature_tongue(tongue_file)
    image_feature = results['image_feature']
    tongue_feature = results['predictions']
    img_fea = np.column_stack(tongue_feature)

    pca = PCA(whiten=True, n_components=6)
    pca_fea = pca.fit_transform(np.row_stack(image_feature)).reshape(1, -1)
    con_fea = np.hstack((img_fea, pca_fea))
    print(con_fea)
    pca_matrix = pca.components_
    pca_mean = pca.mean_
    pca_vars = pca.explained_variance_

    # save as h5 file.
    print('================= PCA RESULT ==================')
    print('pca_matrix: {}'.format(pca_matrix.shape))
    print('pca_mean: {}'.format(pca_mean.shape))
    print('pca_vars: {}'.format(pca_vars.shape))
    print('===============================================')
    print(pca.explained_variance_ratio_)
    print(con_fea.shape)
    return con_fea


def index():
    import random
    pic_path = r'/home/ubuntu/data_tongue'
    fil = open('/home/ubuntu/data/multi_label/con_fea.txt', 'w')
    for i in os.listdir(pic_path):

        if i.startswith('source') or i.endswith('_top'):
            if i.startswith('source'):
                i = i + r'/top'
            list_pics = os.listdir(os.path.join(pic_path, i))
            random.shuffle(list_pics)
            print(os.path.join(pic_path, i))
            for count, file in enumerate(list_pics):
                tongue_file = os.path.join(pic_path, i, file)
                file = file[8:].split('!')[0]
                if file.split('_')[-1][:4] in ['2018','2019']:
                    con_fea = [str(format(i, '.3f')) for i in pca_fea(tongue_file)[0]]
                    fil.write(file + ' ' + ','.join(con_fea) + '\n')

def scale_keep_ar_min_fixed(img, fixed_min):
    ow, oh = img.size

    if ow < oh:

        nw = fixed_min

        nh = nw * oh // ow

    else:

        nh = fixed_min

        nw = nh * ow // oh
    return img.resize((nw, nh), Image.BICUBIC)


def prepare_image(image, target_size):
    """Do image preprocessing before prediction on any data.
    """
    use_gpu = True
    if image.mode != 'RGB':
        image = image.convert("RGB")

    # Resize the input image nad preprocess it.
    image = transforms.Lambda(lambda img: scale_keep_ar_min_fixed(img, 448))(image)
    image = transforms.CenterCrop((448, 448))(image)
    image = transforms.ToTensor()(image)

    # Convert to Torch.Tensor and normalize.
    image = transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])(image)

    # Add batch_size axis.
    image = image[None]
    if use_gpu:
        image = image.cuda()
    return torch.autograd.Variable(image, volatile=True)

def data_class(threshold):
    data_path = open(r'/home/ubuntu/data/multi_label/data/data_all_unique_photo_r.txt', 'r')
    dict_usre = {}
    train_data = []
    label = []
    test_data = []
    test_label = []
    num = 0
    data_p = open(r'/home/ubuntu/data/multi_label/con_fea.txt', 'r')
    for img in data_p.readlines():
        file = img.strip('\n').split(' ')
        name = file[0]
        num_que = file[-1]
        dict_usre[name] = num_que.split(',')
    num_posi = 0
    num_nege = 0
    num_tposi = 0
    num_tnege = 0
    for img in data_path.readlines():
        file = img.strip('\n').split('\t')
        name = file[0][8:]

        num_que = int(file[-1])
        if name in dict_usre.keys():
            num += 1
            if num % 10 == 0:
                if num_que < 4:
                    label_ = 1
                    num_tposi += 1

                elif num_que > 11:
                    label_ = -1
                    num_tnege += 1

            else:
                if num_que < threshold:
                    label_ = 1
                    num_posi +=1

                else:
                    label_ = -1
                    num_nege += 1
    mx = min(num_tposi, num_tnege)
    my = min(num_posi, num_nege)

    print(mx,my)
    num_posi = 0
    num_nege = 0
    num_tposi = 0
    num_tnege = 0
    num = 0
    data_path.close()
    data_path = open(r'/home/ubuntu/data/multi_label/data/data_all_unique_photo_r.txt', 'r')
    for img in data_path.readlines():
        file = img.strip('\n').split('\t')
        name = file[0][8:]

        num_que = int(file[-1])

        if name in dict_usre.keys():
            num += 1

            if num % 10 == 0:
                if num_que < 4:
                    label_ = 1
                    num_tposi += 1
                    if num_tposi < mx:
                        test_data.append(dict_usre[name])
                        test_label.append(label_)
                elif num_que > 11:
                    label_ = -1
                    num_tnege += 1
                    if num_tnege < mx:
                        test_data.append(dict_usre[name])
                        test_label.append(label_)
            else:
                if num_que < threshold:
                    label_ = 1
                    num_posi +=1
                    if num_posi < my:
                        train_data.append(dict_usre[name])
                        label.append(label_)
                else:
                    label_ = -1
                    num_nege += 1
                    if num_nege < my:
                        train_data.append(dict_usre[name])
                        label.append(label_)

    print(len(train_data))
    print(len(test_data))
    print(num_posi, num_nege)
    print(num_tposi, num_tnege)
    t=0
    for i in test_label:
        if i == -1:
            t += 1
    print(t,len(test_label))
    clf = svm.SVC(C=0.8, kernel='rbf', gamma='auto', decision_function_shape='ovr')
    clf.fit(train_data, label)
    predict_list = clf.predict(test_data)
    # print(clf.decision_function(test_data))
    # print(predict_list)

    from sklearn import metrics
    pre_score = metrics.precision_score(test_label, predict_list, labels=[-1], average='macro')
    rec_score = metrics.recall_score(test_label, predict_list, labels=[-1], average='macro')
    f1_score = metrics.f1_score(test_label, predict_list, labels=[-1], average='macro')

    from sklearn.linear_model import LogisticRegression
    print('train_pre_score:', pre_score)
    print('train_rec_score:', rec_score)
    print('f1_score:', f1_score)
    return pre_score, rec_score, f1_score



def LogRegressionAlgorithm(datas,labels):
    kinds = list(set(labels))  # 2个类别的名字列表
    means=datas.mean(axis=0) #各个属性的均值
    stds=datas.std(axis=0) #各个属性的标准差
    N,M= datas.shape[0],datas.shape[1]+1  #N是样本数，M是参数向量的维
    K=2 #k=3是类别数

    data=np.ones((N,M))
    data[:,1:]=(datas-means)/stds #对原始数据进行标准差归一化

    W=np.zeros((K-1,M))  #存储参数矩阵
    priorEs=np.array([1.0/N*np.sum(data[labels==kinds[i]],axis=0) for i in range(K-1)]) #各个属性的先验期望值

    liklist=[]
    for it in range(1000):
        lik=0 #当前的对数似然函数值
        for k in range(K-1): #似然函数值的第一部分
             lik -= np.sum(np.dot(W[k],data[labels==kinds[k]].transpose()))
        lik +=1.0/N *np.sum(np.log(np.sum(np.exp(np.dot(W,data.transpose())),axis=0)+1)) #似然函数的第二部分
        liklist.append(lik)

        wx=np.exp(np.dot(W,data.transpose()))
        probs=np.divide(wx,1+np.sum(wx,axis=0).transpose()) # K-1 *N的矩阵
        posteriorEs=1.0/N*np.dot(probs,data) #各个属性的后验期望值
        gradients=posteriorEs - priorEs +1.0/100 *W #梯度，最后一项是高斯项，防止过拟合
        W -= gradients #对参数进行修正
    print("输出W为：",W)
    return W

def predict_fun(datas,W):
    N, M = datas.shape[0], datas.shape[1] + 1  # N是样本数，M是参数向量的维
    K = 2  # k=3是类别数
    data = np.ones((N, M))
    means = datas.mean(axis=0)  # 各个属性的均值
    stds = datas.std(axis=0)  # 各个属性的标准差
    data[:, 1:] = (datas - means) / stds  # 对原始数据进行标准差归一化

     # probM每行三个元素，分别表示data中对应样本被判给三个类别的概率
    probM = np.ones((N, K))
    print("data.shape:", data.shape)
    print("datas.shape:", datas.shape)
    print("W.shape:", W.shape)
    print("probM.shape:", probM.shape)
    probM[:, :-1] = np.exp(np.dot(data, W.transpose()))
    probM /= np.array([np.sum(probM, axis=1)]).transpose()  # 得到概率

    predict = np.argmax(probM, axis=1).astype(int)  # 取最大概率对应的类别
    print("输出predict为：", predict)
    return predict


def softmax(x):
    # 计算每行的最大值

    # 计算e的指数次幂
    x_exp = np.exp(x)
    x_sum = np.sum(x_exp, axis=0, keepdims=True)
    s = x_exp / x_sum
    return s


def data_prepare_logstic(file_path,num_class,dict_user,index,threshold):
    pre_score_list = []
    rec_score_list = []
    f1_score_list = []
    data_file = open('/home/ubuntu/data/multi_label/test/inquiry/inquiry_questions/'+file_path, 'r').readlines()
    classifier_list = []
    for n_class in  range(num_class):

        datas = []
        labels = []

        pos_num = 0
        neg_num = 0
        for line in data_file:
            linedata = line.strip('[|]|\n').split(', [')
            file = linedata[0].split(',')[0][1:-1]
            if file in dict_user.keys():
                labels_list = [float(i) for i in linedata[-1].split(', ')]
                if labels_list[n_class] == 1.0:
                    pos_num +=1
                else:
                    neg_num +=1
        label_neg = 0
        label_pos = 0
        for line in data_file:
            linedata = line.strip('[|]|\n').split(', [')
            file = linedata[0].split(',')[0][1:-1]
            if file in dict_user.keys():
                labels_list = [float(i) for i in linedata[-1].split(', ')]
                if labels_list[n_class] == 1:
                    label_pos += 1
                    if label_pos > min(pos_num,neg_num):
                        continue
                else:
                    label_neg += 1
                    if label_neg > min(pos_num, neg_num):
                        continue
                datas.append(dict_user[file])
                labels.append(labels_list[n_class])
        datas=np.array(datas).astype(float)
        labels = np.array(labels)
        #
        #
        weight = LogRegressionAlgorithm(datas, labels)
        predic = predict_fun(datas, weight)

        # from sklearn.linear_model.logistic import LogisticRegression
        #
        # X_train, X_test, y_train, y_test = train_test_split(datas, labels,test_size=0.1)
        # classifier = LogisticRegression(penalty='l2',solver='liblinear')
        # model_class = classifier.fit(X_train, y_train)
        # proba = model_class.predict_proba(X_test)[:, 1]
        # # print(classifier.predict(X_test))
        # # print(classifier.decision_function(X_test))
        # predictions = np.array([1 if i > threshold  else 0 for i in proba])
        # # print(proba)
        # # print(predictions)
        # pre_score = metrics.precision_score(y_test, predictions, labels=[1], average='macro')
        # rec_score = metrics.recall_score(y_test, predictions, labels=[1], average='macro')
        # f1_score = metrics.f1_score(y_test, predictions, labels=[1], average='macro')
        # pre_score_list.append(pre_score)
        # rec_score_list.append(rec_score)
        # f1_score_list.append(f1_score)
        # from sklearn.linear_model import LogisticRegression
        # print('=================================')
        # print(index,n_class)
        # print('train_pre_score:', pre_score)
        # print('train_rec_score:', rec_score)
        # print('f1_score:', f1_score)
        #
        # if f1_score > 0.5:
        #     joblib.dump(model_class, '/home/ubuntu/data/multi_label/test/inquiry/models/'+str(index)+'_'+str(n_class)+'.model')
        #     classifier_list.append(model_class)
    return classifier_list,f1_score_list,rec_score_list,pre_score_list

if __name__ == "__main__":
    # load_model()
    # index()
    # import matplotlib.pyplot as plt
    # thred_list = []
    # rec_score_list = []
    # f1_score_list = []
    # for thred in range(8,13):
    #     pre_score, rec_score, f1_score = data_class(thred)
    #     thred_list.append(thred)
    #     rec_score_list.append(rec_score)
    #     f1_score_list.append(f1_score)
    #
    # plt.plot(thred_list, rec_score_list, 'b')
    # plt.plot(thred_list, f1_score_list, 'r')
    #
    # plt.savefig('/home/ubuntu/data/multi_label/data/e.jpg')

    class_list = [3,2,6,3,5,2,2,4,6,3,6,5,3,9,14,12]
    dict_user = {}
    dict_user_split = {}
    data_p = open(r'/home/ubuntu/data/multi_label/con_fea.txt', 'r')
    num_split = 0
    for img in data_p.readlines():
        num_split += 1
        file = img.strip('\n').split(' ')
        name = file[0].split('_')[0]
        num_que = file[-1]
        if num_split < 141000: #141000
            dict_user[name] = num_que.split(',')
        else:
            dict_user_split[name] = num_que.split(',')

    classifier_list = []
    f1_score_list = []
    rec_score_list = []
    pre_score_list = []
    inde = []
    f1_score_li = []
    rec_score_li = []
    pre_score_li = []
    for i in range(1,8):
        for file in os.listdir(r'/home/ubuntu/data/multi_label/test/inquiry/inquiry_questions'):
            t = int(file.split('.')[0])%100
            inde.append(t)
            classifier,f1_score,rec_score,pre_score = data_prepare_logstic(file,class_list[t],dict_user,t,0.5 + i*0.05)
            classifier_list.append(classifier)
            f1_score_list.append(f1_score)
            rec_score_list.append(rec_score)
            pre_score_list.append(pre_score)
        index_list = [i for i, v in sorted(enumerate(inde), key=lambda x: x[1])]
        #
        for index in index_list:
            for k in f1_score_list[index]:
                f1_score_li.append(k)
            for k in rec_score_list[index]:
                rec_score_li.append(k)
            for k in pre_score_list[index]:
                pre_score_li.append(k)
        print(len(f1_score_li))
        plt.plot(range(len(f1_score_li)), f1_score_li, 'b',label='f1')
        plt.plot(range(len(rec_score_li)), rec_score_li, 'r',label='rec')
        plt.plot(range(len(pre_score_li)), pre_score_li, 'g',label='pre')
        plt.savefig('/home/ubuntu/data/multi_label/data/'+ str(0.5 + i*0.01)+ '.jpg')

    # load logistic model and predict
    # path = r'/home/ubuntu/data/multi_label/test/inquiry/models'
    #
    #
    # import matplotlib.pyplot as plt
    # score_list = []
    # items_list = []
    # question_path = r'/home/ubuntu/data/multi_label/test/inquiry/inquiry_questions'
    # for file in os.listdir(question_path):
    #     t = int(file.split('.')[0])
    #     data_file = open(os.path.join(question_path,file), 'r').readlines()
    #
    #     for line in data_file:
    #         linedata = line.strip('[|]|\n').split(', [')
    #         file = linedata[0].split(',')[0][1:-1]
    #         if file in dict_user.keys():
    #             labels_list = [float(i) for i in linedata[-1].split(', ')]
    #
    #
    # nu = 0
    # for key,value in dict_user_split.items():
    #     score = 0
    #     nu += 1
    #     for file in os.listdir(path):
    #         classifer = joblib.load(os.path.join(path,file))
    #         result = classifer.predict_proba(np.expand_dims(np.array(value),0).astype(float))[:, 1]
    #         score += result[0]
    #     if score > 42:
    #         print(key, score)
    # print(nu)
