import os.path
from nltk.corpus import stopwords
import hashlib
import jieba
import re
import sys
from gensim.parsing.preprocessing import remove_stopwords


class Function:
    def __init__(self):
        self.stopwords_ = set(stopwords.words('english'))
        self.sw_ = set(stopwords.words('chinese'))
        self.dischart = []
        self.top = []
        self.hvl = []
        self.w_l = []
        self.contentlist = []
        self.highrelativelist = []
        self.dualpair = {}
        self.condence = {}
        filename = 'stopwords.txt'
        # rela = os.path.join(".", filename)
        if getattr(sys, 'frozen', False):
            bp = sys._MEIPASS
        else:
            bp = os.path.abspath(".")
        # path_ = os.path.join(bp, rela)
        path_ = os.path.join(bp, filename)

        with open(path_, encoding='utf-8') as f:
            self.stopwords = f.read().splitlines()
        f.close()

    def chineseamount(self, word):
        c = 0
        for i in word:
            if re.match(r'^[\u4e00-\u9fa5]+$', i):
                c += 1
        return c

    def creatop20(self, items, name):
        l_ = len(items)
        print(f'condense words are {l_}')
        l10 = int(l_ / 20)
        cc = 0
        top20 = {}
        condencelist = []
        condencelist.append([])
        condencelist[0].append('words')
        condencelist[0].append('times')
        condencelist[0].append('ratio')
        for j in range(l10):
            ratio = round(float(items[j][1]) / float(l_), 4)
            if ratio < 0.01:
                continue
            else:
                condencelist.append([])
                cc += 1
                word, count = items[j]
                condencelist[j + 1].append(word)
                condencelist[j + 1].append(count)
                condencelist[j + 1].append(ratio)
                c = self.chineseamount(word)
                # 中文字符数量
                len_ = len(word) - c + c * 1.5
                ss = word + ' ' * int(10 - len_)
                s_ = f'{ss}\t{count:<6}'
                print(s_, end=" ")
                # 根据前20个最常出现的每个分词占所有统计分词的比例来设定一个权重
                print(ratio)
                if ratio < 0.03:
                    top20[items[j][0]] = 2
                elif ratio < 0.05:
                    top20[items[j][0]] = 3
                elif ratio < 0.1:
                    top20[items[j][0]] = 4
                elif ratio < 0.15:
                    top20[items[j][0]] = 5
                else:
                    top20[items[j][0]] = 6
        self.condence[name] = condencelist
        ra = round(cc / l_ * 100, 3)
        print(f'amount of top {ra}% words are {l10}')
        return top20

    def creathashlist(self, top20, hashlist):
        hvl = []
        for j in top20:
            binary_bytes = ""
            hb = hashlib.md5()
            hb.update(j.encode('utf-8'))
            # 获取每个分词对应的hash对象
            hashvalue = hb.digest()
            hvl.append(hashvalue)
            # 取每个hash对象的字节串格式
            for k in hashvalue:
                binary_bytes += bin(k)[2:]
            # print(len(hashlist))
            for k in range(len(binary_bytes)):
                if binary_bytes[k] == '1':
                    hashlist[k] += top20[j]
                else:
                    hashlist[k] -= top20[j]
        self.hvl.append(hvl)

    def validculculation(self, par, name):
        hashlist = []
        self.dischart.append([])
        words = jieba.lcut(remove_stopwords(par.lower()))
        counts = {}
        for word in words:
            if ((len(word) > 1 and word not in self.stopwords_
                 and word not in self.sw_)
                    and word not in self.stopwords):
                counts[word] = counts.get(word, 0) + 1
        # 遍历每个词语，统计词语的出现次数
        # 如果这个词只有一个字符长度不算在内，如果这个词是停用词也不算在内
        items = list(counts.items())
        items.sort(key=lambda x: x[1], reverse=True)
        top20 = self.creatop20(items, name)
        self.top.append(top20)
        for j in range(128):
            hashlist.append(0)
        # hashlist代表每个文档的hash值的128位
        self.creathashlist(top20, hashlist)
        # 通过将文档中每个高频词的hash码的出现频率对应的权重对文档hash特征码进行叠加影响
        finalhashbits = ""
        for j in hashlist:
            if j > 0:
                finalhashbits += '1'
            else:
                finalhashbits += '0'
        # 通过扫描文档特征码每一位的正负来创建二进制的化简文档特征码
        self.contentlist.append(finalhashbits)
        #contentlist是每个文件的hash值
        print('----------------------------------------------------------------')

    def getra(self, i, j):
        cot = 0
        for k in range(128):
            if self.contentlist[i][k] != self.contentlist[j][k]:
                cot += 1
        ra = round((128 - cot) / 128, 4)
        return ra

    def getco2(self, l1, l2, i, j):
        co2 = 0
        # 计算两个集合的重叠率即相似度
        # Jaccard相似度系数 = 两个集合的交集元素数量与并集元素数量的比值
        # co2计数两个集合交集元素数量
        if l1 < l2:
            for k in self.hvl[i]:
                if k in self.hvl[j]:
                    co2 += 1
        else:
            for k in self.hvl[j]:
                if k in self.hvl[i]:
                    co2 += 1
        # 上面的判断是为了减小计算量
        # 优先扫描高频词较少的文档对应的高频词hash码集合
        # 查看另一个文档中是否存在相同的hash码
        # 这意味着一个文档中的某个高频词也存在于另一个文档中
        return co2

    def creatrelativepair(self):
        dualp = {}
        conlen = len(self.contentlist)
        for i in range(conlen):
            l1 = len(self.top[i])
            if l1 == 0:
                continue
            for j in range(i + 1, conlen):
                l2 = len(self.top[j])
                if l2 == 0:
                    continue
                ra = self.getra(i, j)
                co2 = self.getco2(l1, l2, i, j)
                l12 = l1 + l2 - co2
                # l12是两个集合的并集元素数量
                ra2 = round(co2 / l12, 4)
                rafi = round(ra2 * ra, 4)
                self.dischart[i][j] = rafi
                # print(f'relative of two passage is {rafi}')
                pair = self.w_l[i] +'\t'+ self.w_l[j]
                dualp[pair] = rafi
        return dualp

    def createhrl(self, wli, wlj):
        if not self.highrelativelist:
            self.highrelativelist.append([])
            self.highrelativelist[-1].append(wli)
            self.highrelativelist[-1].append(wlj)
        else:
            wlin = wli in self.highrelativelist[-1]
            wljn = wlj in self.highrelativelist[-1]
            if not (wlin and wljn):
                self.highrelativelist.append([])
                self.highrelativelist[-1].append(wli)
                self.highrelativelist[-1].append(wlj)
            elif wlin and not wljn:
                self.highrelativelist[-1].append(wlj)
            elif wljn and not wlin:
                self.highrelativelist[-1].append(wli)

    def highrelative(self):
        for i in range(len(self.dischart)):
            for j in range(i + 1, len(self.dischart[i])):
                if self.dischart[i][j] > 0.5:
                    wli = self.w_l[i]
                    wlj = self.w_l[j]
                    self.createhrl(wli, wlj)


    def inidischt(self, wl):
        dsctlen = len(self.dischart)
        for i in range(dsctlen):
            for j in range(dsctlen):
                self.dischart[i].append(0)
        dualp = self.creatrelativepair()
        self.dualpair = sorted(dualp.items(), key=lambda x: x[1], reverse=True)
        self.highrelative()
        self.dischart.insert(0, [])
        for i in wl:
            pos = wl.index(i) + 1
            self.dischart[0].append(pos)
        for i in range(len(self.dischart)):
            if i == 0:
                self.dischart[0].insert(0, '')
            else:
                inst = self.dischart[0][i]
                self.dischart[i].insert(0, inst)
        for i in range(len(self.dischart)):
            for j in range(len(self.dischart)):
                if j == i and i != 0:
                    self.dischart[i][j] = 1
