# -*- coding: utf-8 -*-
# Created by Hardy on 15th, Aug 2018

from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division

import sys
from pyspark import SparkContext
from pyspark import HiveContext
from pyspark.sql.functions import udf, broadcast
from pyspark.sql.types import StructField, StringType, StructType, ArrayType, MapType, IntegerType, DoubleType, \
    FloatType
import operator
import jieba

# from sklearn.feature_extraction import DictVectorizer
# from sklearn.linear_model import LogisticRegression
import pickle

DEFAULT_CLASS = 'overall'


def class_cal(c):
    ret = DEFAULT_CLASS
    if c[1] > 0.3:
        ret = c[0]
    return ret


def flatten(list_of_list):
    return [i for sublist in list_of_list for i in sublist]


def get_dict(words):
    out = dict()
    for w in words:
        out[w.strip()] += out.get(w.strip(), 0) + 1
    return out


def classify(cv, tfidf, lg, data):
    kv = {}
    if not data:
        kv[DEFAULT_CLASS] = 1.0
    else:
        words = ' '.join(jieba.cut(data))
        d = cv.transform([words])
        d = tfidf.transform(d)
        res = zip(lg.classes_, lg.predict_proba(d)[0])
        out = sorted(res, key=operator.itemgetter(1), reverse=True)
        for k, v in out[0:1]:
            kv[str(k)] = float(v)
    return kv


def get_weight(keys, keywords_dict):
    weight = [keywords_dict[k] for k in keys]
    return weight


def sort_dict(dic):
    sort_dic = sorted(dic.items(), key=lambda s: s[1], reverse=True)
    return sort_dic


def get_sorted_key(List):
    key = [x[0] for x in List]
    return key


def get_smzdm_class1(lis, bc_dic_value):
    class_id_lis = [bc_dic_value.get(x.strip()) for x in lis]
    return class_id_lis


if __name__ == '__main__':
    if len(sys.argv) < 3:
        cv_file = '/Users/hardy/data/models/cv20_all.pkl'
        tfidf_file = '/Users/hardy/data/models/tfidf20_all.pkl'
        lg_file = '/Users/hardy/data/models/lg20_all.pkl'
        # infile = '/Users/hardy/data/train.tsv'
        # outfile = ''
    else:
        cv_file = sys.argv[1]
        tfidf_file = sys.argv[2]
        lg_file = sys.argv[3]
        # infile = sys.argv[4]
        # outfile = sys.argv[5]

    CV = pickle.load(open(cv_file, 'rb'))
    TFIDF = pickle.load(open(tfidf_file, 'rb'))
    logreg = pickle.load(open(lg_file, 'rb'))

    sc = SparkContext(appName="Classify youzan secondary classes ")
    sqlContext = HiveContext(sc)

    bc_CV = sc.broadcast(CV)
    bc_TFIDF = sc.broadcast(TFIDF)
    bc_logreg = sc.broadcast(logreg)

    udf_predict = udf(lambda title: classify(bc_CV.value, bc_TFIDF.value, bc_logreg.value, title),
                      MapType(StringType(), FloatType()))
    # udf_get_sort_dict = udf(lambda dic: sort_dict(dic), ArrayType(StringType()))
    # udf_get_sort_key = udf(lambda lis: get_sorted_key(lis), ArrayType(StringType()))
    # udf_get_weight = udf(lambda keys, keywords_dict: get_weight(keys, keywords_dict), ArrayType(FloatType()))

    # rdd = sc.textFile(infile).map(lambda x: x.split('\t')).filter(lambda x: len(x) == 2)
    # schema = StructType(
    #     [
    #         StructField('label', StringType(), True),
    #         StructField('title', StringType(), True)
    #     ]
    # )
    #
    # df = SQLContext.createDataFrame(rdd, schema)
    # df_out = df.withColumn('')
    # print(df.take(10))
    sql = """
    select
a.*,b.team_name
from
(
    select id as goods_id,kdt_id,alias,title from dw.dwd_ic_goods_d where is_display==1 and is_delete==0  and from_unixtime(unix_timestamp(update_time),"yyyyMMdd") = '${END_DAY}'
) a join (select kdt_id,team_name from dw.dws_team_biz_d where is_lock <> 1 ) b on a.kdt_id = b.kdt_id
    """
    df = sqlContext.sql('select * from dm_ai.goods_category_classify_final')
    df = df.withColumn('class2_new_id', udf_predict('title'))
    print(df.take(100))


    sc.stop()
