import os
import tools
import pickle
import numpy as np

# 获取输出文件列表
def getList(filename):
    filelist = dict()
    file = open(filename,'rb')
    filelist = pickle.load(file)
    file.close()
    print(filelist)
    return filelist

# 输出文件内容
def quickshot(filelist,search,searchcontext):
    titlelist = []
    resultlist = []
    # with open(tools.projectpath + '/' + searchfile, 'r', encoding='utf-8') as sch:
    #     searchcontent = pickle.load(sch)
    #     print(searchcontent)
    for filename in filelist:
        print("reading... " + filename)
        print(tools.projectpath)
        with open(tools.projectpath + '/' + 'data/data_full1_keywords/' + filename, 'rb') as file:
            content = pickle.load(file)
            # print(content["context"])
            # url = content["url"]        # 打印url
            # if not url:
            #     continue
            # print(url, end="")
            #
            title = content["title"]    # 打印标题
            # if not title:
            #     continue
            # print(title, end="")
            titlelist.append(title)
            #
            # print(f"相关度: {filelist[filename]}")     # 打印相关度
            # date = content["time"]     # 打印日期
            # if not date:
            #     continue
            # print(date, end="")
            print(content["context"])
            sentense = "..." + getdigest(content["context"],titlelist,searchcontext) + "..."
            resultlist.append(sentense)
        print("\n-----------------")
    print(resultlist)
    return resultlist
    # recommend(search)

# 获取与检索内容相关的片段
# search是检索内容
def getdigest(content,titlelist,searchcontent):
    # 优先进行短语检索
    # 输出第一个短语匹配成功的位置n,n-20~n+20之间的字

    # wordpos = dict()
    poslist = []
    for word in searchcontent:
        # print("************************")
        # print(word)
        if content != "":
            temp = indexstr(content,word)
            # print(temp)
            poslist.extend(temp)

    # 设计多种方案选择最优解
    # 求上四分位，下四分位，中位数，计算哪个包含的关键词最多
    if len(poslist) > 0:
        median = int(np.median(poslist))  # 中位数
        print(f"中位数{median}")
        sortlist = sorted(poslist)
        print(sortlist)
        q1 = int(1 + (float(len(sortlist)) - 1) * 1 / 4)
        q3 = int(1 + (float(len(sortlist)) - 1) * 3 / 4)
        print(q1,q3)
        q1 = poslist[q1-1]
        q3 = poslist[q3-1]
        print(f"上四分位数:{q1},下四分位数:{q3}")
        # print(f"平均值{mean}")
        # print(f"众数{bignum}")


        pos = domainnum(sortlist,q1,q3,median,content)

        left = (pos-50) if (pos - 50) > 0 else 0
        right = (pos + 50) if (pos + 50) < len(content) else len(content)
        print(content[left:right+1],end="")
        result = content[left:right+1].replace("\r","").replace("\n","").replace("\t","")
    else:
        result = "[图片]"
    print(result)
    return result

# # 相关推荐
# # 对于search做同义词替换，作为相关推荐
# def recommend(search):
#     print(search)
#     searchcontent = synonyms.seg(search)
#     print(searchcontent)
#
#     itemlist = []
#     nolist = []
#     count = 0
#     for i in range(len(searchcontent[1])):
#         if searchcontent[1][i] == 'n' or searchcontent[1][i] == "ns":
#             itemlist.append(searchcontent[0][i])
#             nolist.append(i)
#             count = count + 1
#             if count == 3:
#                 break
#
#
#     worddict = dict()
#     for item in itemlist:
#         wordlist = synonyms.nearby(item)
#         print(wordlist)
#         for i in range(len(wordlist[1])):
#             if wordlist[1][i]<0.8:
#                 worddict[item] = wordlist[0][i]
#                 break
#     #
#     # print(worddict)
#     valuelist = list(worddict.values())
#     print(valuelist)
#     # for i in range(len(worddict)):
#     #     searchcontent[0][nolist[i]] = valuelist[i]
#     #     # print(searchcontent[0][0:no+1 if no+1<=len(searchcontent[0]) else len(searchcontent[0])])
#     #
#     # s = ""
#     # no = nolist[-1]
#     # if searchcontent[1][no] == "v":
#     #     no = no+2
#     # else:
#     #     no = no+1
#     # for each in searchcontent[0][0:no if no<=len(searchcontent[0]) else len(searchcontent[0])]:
#     #     if each != " ":
#     #         s = s + each
#     # print(s)


# 选择部分文章标题作为相关推荐
# 使用synonyms计算检索语句和文章标题的相似度，选取最高的3个输出
# 放弃
# def recommend(titlelist,search):
#     dict1 = dict()
#     for title in titlelist:
#         temp = synonyms.compare(title, search, seg=True)
#         dict1[title] = temp
#         # print(temp)
#     dict1 = sorted(dict1.items(), key = lambda kv:(kv[1], kv[0]),reverse=True)
#     # print(dict1)
#     titlelist2 = [k[0] for k in dict1]
#     # print(titlelist2)
#     if len(titlelist2) < 3:
#         print(titlelist2[:])
#     else:
#         print(titlelist2[0:3])

def domainnum(sortlist,q1,q3,median,content):
    # 上四分位数
    left = (q1 - 20) if (q1 - 20) > 0 else 0
    right = (q1 + 20) if (q1 + 20) < len(content) else len(content)
    list1 = [k>=left and k<=right for k in sortlist]
    num1 = list1.count(True)
    # print(list1.count(True))

    # 下四分位数
    left = (q3 - 20) if (q3 - 20) > 0 else 0
    right = (q3 + 20) if (q3 + 20) < len(content) else len(content)
    list2 = [k >= left and k <= right for k in sortlist]
    num2 = list2.count(True)
    # print(list2.count(True))

    # 中位数
    left = (median - 20) if (median - 20) > 0 else 0
    right = (median + 20) if (median + 20) < len(content) else len(content)
    list3 = [k >= left and k <= right for k in sortlist]
    num3 = list3.count(True)
    # print(list3.count(True))

    if num1 >= num2 and num1 >= num3:
        return q1
    elif num2 >= num1 and num2 >= num3:
        return q3
    else:
        return median

# 查找指定字符串str1包含指定子字符串str2的全部位置
def indexstr(str1, str2):
    '''
    以列表形式返回
    '''
    lenth2 = len(str2)
    lenth1 = len(str1)
    indexstr2 = []
    i = 0
    while str2 in str1[i:]:
        indextmp = str1.index(str2, i, lenth1)
        indexstr2.append(indextmp)
        i = (indextmp + lenth2)
    return indexstr2

# 测试
# getList('list')
# quickshot(getList("list"),"search.txt")
# recommend("中办：向重点乡村持续选派驻村第一书记和工作队")
# 测试数据
# list = {"2.txt":0.94, "1.txt":0.83}
# path = 'list'
# f = open(path,'wb')
# pickle.dump(list,f)
# f.close()

# import pickle
# with open("data/data_full1_keywords/588851","rb") as file:
#     context = pickle.load(file)
#     print(context)

# print(tools.projectpath)

# import re
# line="this model server"
# pattern=r"hdr-biz"
# m = re.search(pattern, line)
# print(m)