# -* coding:utf-8 -*-

import jieba
# import Levenshtein
import difflib
import numpy as np

# jieba.load_userdict("dict.txt")

# 过滤词，这里只是针对不同网站标题的过滤词，如果数量很大可以保存在一个文件中
stopwords = ["【", "】", "[", "]", "直邮", "包邮", "保税", "全球购", "包税", "（", "）", "原装", "京东超市", "马尾保税仓", "发货", "进口", "+", "/",
             "-", "亏本", "特价", "现货", "天天特价", "专柜", "代购", "预定", "正品", "旗舰店", "#",
             "转卖", "国内", "柜台", "无盒", "保税仓", "官方", "店铺", "爆款", "、"]
# 停用词，这里只是针对例子增加的停用词，如果数量很大可以保存在一个文件中
conversions = ["克", "毫升", "约"]


class StrSimilarity():
    def __init__(self, word):
        self.word = word

    # Compared函数，参数str_list是对比字符串列表
    # 返回原始字符串分词后和对比字符串的匹配次数，返回一个字典
    def Compared(self, str_list):
        dict_data = {}
        for cons in conversions:
            self.word[4] = self.word[4].replace(cons, "")
        sarticiple = [self.word[0], self.word[2], self.word[3], self.word[4], self.word[5], self.word[6], self.word[7]]
        for strs in str_list:
            for sws in stopwords:
                strs_1 = strs.replace(sws, " ")
            num = 0
            for sart in sarticiple:
                sart = sart
                counts = strs_1.count(sart)
                if counts != 0:
                    num = num + 1
                else:
                    num = num - 100
            if num > 0:
                dict_data[strs] = num
        return dict_data

    # NumChecks函数，参数dict_data是原始字符串分词后和对比字符串的匹配次数的字典，也就是Compared函数的返回值
    # 返回出现次数最高的两个，返回一个字典
    def NumChecks(self, dict_data):
        list_data = sorted(dict_data.items(), key=lambda asd: asd[1], reverse=True)
        # length = len(list_data)
        json_data = {}
        # if length>=2:
        #     datas = list_data[:2]
        # else:
        #     datas = list_data[:length]
        for data in list_data:
            json_data[data[0]] = data[1]
        return json_data

    # MMedian函数，参数dict_data是出现次数最高的两个对比字符串的字典，也就是NumChecks函数的返回值
    # 返回对比字符串和调节值的字典
    def MMedian(self, dict_data):
        median_list = {}
        length = len(self.word)
        for k, v in dict_data.items():
            num = np.median([len(k), length])
            if abs(length - num) != 0:
                # xx = (1.0/(abs(length-num)))*0.1
                xx = (abs(length - num)) * 0.017
            else:
                xx = 0
            median_list[k] = xx
        return median_list

    # Appear函数，参数dict_data是对比字符串和调节值的字典，也就是MMedian函数的返回值
    # 返回最相似的字符串
    def Appear(self, dict_data):
        sum_data = []
        json_data = {}
        for k, v in dict_data.items():
            word = " ".join(self.word)
            fraction = difflib.SequenceMatcher(None, word, k).quick_ratio()
            json_data[k] = fraction
        tulp_data = sorted(json_data.items(), key=lambda asd: asd[1], reverse=True)
        for data in tulp_data:
            sum_data.append(data[0])
        return tulp_data


def main(query_str, str_list):
    # query_str = ["诗留美屋", "Rosette", "海泥洁面乳洗面奶", "粉色白泥", "120克", "", "", ""]  #

    if len(query_str) >= 8:
        query_str = query_str[:8]
    else:
        lens = len(query_str)
        [query_str.append('') for x in range(8 - lens)]

    print(query_str)
    print(str_list)
    ss = StrSimilarity(query_str)
    list_data = ss.Compared(str_list)
    num = ss.NumChecks(list_data)
    mmedian = ss.MMedian(num)
    return ss.Appear(mmedian)


if __name__ == "__main__":
    main()
