#直接输入中文词典为初始经验 和 记忆遗忘方式批量处理文本文件或处理句子文本进行新词学习 基于中文词语提取文本的特征 用于后续的文本雷同和自动分类
#本代码不是nlp 不是切词 仅仅是基于词语提取特征
#作者24476828@qq 该模块单个句子处理完成2021-5-1
import re, os, ujson, datetime, itertools, math
from collections import Counter
import asyncio
import aredis
#import pandas as pd

loop=asyncio.get_event_loop()
rt=aredis.StrictRedis(host="localhost", port=6379, db=0, charset='utf8', decode_responses=True)#临时记忆双字组合key
rdNet=aredis.StrictRedis(host="localhost", port=6380, db=0, charset='utf8', decode_responses=True)#永久词key 可变关联词value
rdIndex=aredis.StrictRedis(host="localhost", port=6381, db=0, charset='utf8', decode_responses=True)#永久记忆双字组合key 整词为v

encoding="utf8"
regexpstr="[\s\'\"“”`‘’。?？!！：:;；，,—、《》【】\(\)\{\}\<\>\[\]]|\.{3}|--"

class wordsNet(object):
    def __init__(self):
        #self.encoding=encoding
        self.dt=datetime.timedelta(seconds=0, microseconds=100000)#记忆遗忘参数 人类短时记忆2秒 此处给程序设定0.1秒
        self.drmdays=datetime.timedelta(days=1)#每天做梦一次清理记忆
        self.lifevol=10#时间加权积累刺激达到10 转为固化记忆
        
    async def dictDir(self, dictdir, encoding):#初始词典目录 批量处理多个词典txt文件
        path = dictdir
        flist = [f for f in os.listdir(path) if f[-4:] == ".txt"]
        for i in flist:
            with open(os.path.join(dictdir, i), "rt", encoding=encoding) as f:
                while True:
                    lines=f.readlines(1024*1024)
                    if not lines:
                        break
                    for i in lines:
                        await rdNet.set(i.splitlines()[0], ujson.dumps([1, {}]), nx=True)#建立词为k 其他关联词为v的dict 标志1 该条目来自词典
                
    async def genDictIndex(self, mode=0):#从词典生成基元
        if mode:
            await rdIndex.flushall()
        for k in await rdNet.keys("*"):
            await self.addDictIndex(k)
                
    async def addDictIndex(self, t):#从参数t词生成基元
        for j in range(len(t)-1):
            await rdIndex.sadd(t[j:j+2], t)#每个词 切分为双字组合基元 双字组合为k 所有包含该组合的词为v的dict
                
    async def extractTxtFile(self, filename, encoding):#, mode=0, num=0, rate=0.2):
        #mode=0通过文本文件学习新词 句子打乱词语碎片化 不提取特征
        #res=Counter()
        with open(filename, "rt", encoding=encoding) as f:
            while True:
                lines=f.readlines(1024*50)#每次50k
                if not lines:
                    break
                for line in lines:
                    t=line.splitlines()[0]
                    parts=re.split("[。?？!！]", t)#按句子切分
                    for p in parts:
                        await self.extractTxtLine(p, 0)
        
    async def extractTxtLine(self, t, mode=0, nowtime=datetime.datetime.now()):
        #单句文本特征提取 默认取前120个词/占比20%的词为特征 两者取短 mode=0不返回结果只学习 mode=1只提取不打印中间信息 mode=2提取且打印
        data, pText=self.preProcTxtLine(t)#调用单行文本预处理
        await self.dualchar(data, nowtime)#临时记忆双字组合key学习
        okList, tempText=await self.wordsLine(data, pText, nowtime)#调用单句词语处理
        await self.updateRel(okList, nowtime)
        if mode!=0:
            okCounter=Counter(okList)
            otherCounter=Counter(filter(lambda x:len(x)>0,re.split(regexpstr, tempText)))
            if mode==2:
                print(okCounter, otherCounter)
            okCounter.update(otherCounter)
            return okCounter
        return

    def preProcTxtLine(self, p):#单句预处理
        pText=p#单句包含标点符号的内容 是整体句子变量
        plist=re.findall("(?:[^\w\s]|_){3,}", p)#三个连续符号识别为表情
        plist=re.findall("《(.+?)》", p)#成对标点（括号、书名号）中的内容 单独取出为一个片段
        plist.extend(re.findall("\(([^\(\)]+)\)", p))
        plist.extend(re.findall("\[([^\[\]]+)\]", p))
        plist.extend(re.findall("{([^{}]+)}", p))
        plist.extend(re.findall("<([^<>]+)>", p))
        #plist.extend(re.findall("'([^']+)'", p))
        #plist.extend(re.findall("‘(.+?)’", p))
        #plist.extend(re.findall('"([^"]+)"', p))
        #plist.extend(re.findall('“(.+?)”', p))
        plist.extend(re.findall('【(.+?)】', p))
        p=re.sub("(?:[^\w\s]|_){3,}"," ",p)
        p=re.sub("《(.+?)》"," ",p)
        p=re.sub("\(([^\(\)]+)\)"," ",p)
        p=re.sub("\[([^\[\]]+)\]"," ", p)
        p=re.sub("{([^{}]+)}"," ", p)
        p=re.sub("<([^<>]+)>"," ", p)
        #p=re.sub("'([^']+)'"," ", p)
        #p=re.sub("‘(.+?)’"," ", p)
        #p=re.sub('"([^"]+)"'," ", p)
        #p=re.sub('“(.+?)”'," ", p)
        p=re.sub('【(.+?)】'," ", p)
        plist.extend(re.split(regexpstr, p))
        #将剩余内容按某些标点符号切分为片段
        data=[]
        for i in filter(lambda x:len(x)>1, plist):
            for j in range(len(i)-1):
                data.append(i[j:j+2])#整句片段 按双字切分组合 存入一个list
        return (set(data), pText)
    
    async def dualchar(self, dcSet, nowtime):#临时记忆双字组合key学习 可转长期记忆
        for i in dcSet:
            if not await rdIndex.smembers(i):
                if not await rt.set(i, ujson.dumps((1, 2, nowtime.isoformat())), nx=True):
                    vol, p, tStr=ujson.loads(await rt.get(i))
                    tTime=datetime.datetime.fromisoformat(tStr)
                    tu=self.remember(nowtime, (vol, p, tTime))
                    if tu[1]==0:
                        await rdIndex.sadd(i, i)
                        await rt.delete(i)
                    else:
                        await rt.set(i, ujson.dumps((tu[0], tu[1], tu[2].isoformat())))

    async def dcWords(self, newSet, pText, nowtime):#长期记忆新词语学习
        newlist=[]
        for x,y in itertools.product(newSet, newSet):
            if x[0]==y[-1] and y[:-2]+x in pText:
                if await self.newWords(y[:-2]+x, nowtime):
                    newlist.append(y[:-2]+x)
            if y[0]==x[-1] and x[:-2]+y in pText:
                if await self.newWords(x[:-2]+y, nowtime):
                    newlist.append(x[:-2]+y)
            if x+y in pText:
                if await self.newWords(x+y, nowtime):
                    newlist.append(x+y)
            if y+x in pText:
                if await self.newWords(y+x, nowtime):
                    newlist.append(y+x)
        #newlist=list(filter(lambda x:len(x)>2, newlist))
        for n in newlist:
            await rdNet.set(n, ujson.dumps([0, {}]), nx=True)#建立新词 标志0 该条目非词典 来自自学
            for idx in range(len(n)-1):
                await rdIndex.sadd(n[idx:idx+2], n)
                await rt.delete(n)
        return set(newlist)
            
    async def newWords(self, new, nowtime):#从短字组合12 234 寻找长字组合1234为关联词
        if len(new)>2:
            if not await rdNet.get(new):#如组合是短时记忆的
                item=await rt.get(new)
                if item:
                    vol, p, tStr=ujson.loads(item)
                    tu=self.remember(nowtime, (vol, p, datetime.datetime.fromisoformat(tStr)))
                    if vol!=255:
                        await rt.set(new, ujson.dumps((tu[0], tu[1], tu[2].isoformat())))
                else:
                    tu=(1, 2, nowtime)
                if tu[1]==0:#组合是长期记忆的 才用于生成新词
                    return True
                else:
                    return False
            else:
                return False
        else:#组合是永久记忆的 不是新词
            return False
                        
    def remember(self, nowtime, tu):
        vol, p, tTime=tu
        pt=nowtime-tTime
        if vol==255:
            return tu
        if p==2 and pt<self.dt:#如是短时记忆2秒再次命中 延长记忆时间为1分钟 对应机器是0.1秒 和 3秒
            #字组合长并不一定容易识别 还需要理解才能记住
            tu=(2, 60, nowtime)
        elif p==60 and pt<self.dt*30:#如是短时记忆1分钟再次命中 延长记忆按小时衰减
            tu=(3, 0, nowtime)
        elif p==0:
            forget=0.56**math.log(vol, math.e)*(pt/self.dt*1800)**0.06#遗忘曲线 1小时剩余记忆1-0.56*（小时数）^0.06
            #vol积累的次数 0.56**math.log(vol, math.e)衰减系数 时间 衰减量=时间指数*衰减系数
            volfg=int(vol*(1-forget))
            if volfg+1>=self.lifevol:#积累完成 成为永久记忆
                tu=(255, 0, 0)
            else:
                if volfg<=0:#衰减弱化
                    tu=(1, 2, nowtime)
                elif volfg==1:
                    tu=(2, 60, nowtime)
                else:#继续积累 
                    tu=(volfg+1, 0, nowtime)
        return tu
    
    async def wordsLine(self, data, pText, nowtime):
        #单句片段文本词语识别 mode=0不提取只学习 mode=1只提取不打印中间信息 mode=2提取且打印
        #兼容度 共存1/互斥-1 今后优化：词频关联 权重累加
        wordlong, wordrel, wordList=set(), dict(), []
        #dcList=list(filter(async lambda x:await rdIndex.smembers(x) is not None, data))#读data中的长期记忆双字组合
        dcList=[]
        for i in data:#读data中的长期记忆双字组合
            if await rdIndex.smembers(i):
                dcList.append(i)
        newlist=await self.dcWords(dcList, pText, nowtime)#长期记忆新词语学习
        for dc in dcList:
            wordlong|=set(filter(lambda x:x in pText, await rdIndex.smembers(dc)))#识别长期词语 存入一个set
        for i in wordlong:#初始化本句中各词语的不冲突情况：兼容度
            if await rdNet.get(i):
                for j in range(pText.count(i)):
                    wordrel[(i, j)]=0
                source, d=ujson.loads(await rdNet.get(i))#取词语的相关词
                for j in d.keys():
                    if j in pText and d[j][1]==0:#长期记忆的相关词
                        for r in range(pText.count(j)):
                            wordrel[(j, r)]=0
        for i in wordrel.keys():#词语兼容度计算
            txt=pText.replace(i[0], " ", i[1]+1)
            for j in wordrel.keys()-set(i):
                wordrel[i]+=txt.count(j[0])
        valueList=list(set(wordrel.values()))
        valueList.sort(reverse=True)#兼容度排序
        orderList=[]
        for i in valueList:
            li=list(set(len(k[0]) for k,v in filter(lambda x:x[1]==i, wordrel.items())))
            li.sort(reverse=True)
            #兼容性相同时 词语长度优先
            for k in li:
                words=[(m[0],wordrel[m[0]]) for m in filter(lambda x:x[1]==i and len(x[0][0])==k, wordrel.items())]
                orderList.extend(words)
                #完成兼容性、长度排序
        okList=[]
        tempText=pText
        for i in orderList:#按顺序取词
            if i[0][0] in tempText:
                okList.append(i[0][0])
                tempText=tempText.replace(i[0][0]," ", 1)
        return okList, tempText
    
    async def updateRel(self, li, nowtime):
        for i in set(li):
            source, d=ujson.loads(await rdNet.get(i))
            for s in set(li)-set([i]):# 同一句中胜出的长期关键组合之间建立关联 
                if s not in d.keys():#如dict没有关联条目 初次新增
                    d[s]=(1, 2, nowtime.isoformat())
                else:
                    vol, p, tStr=d[s]
                    if vol==255:
                        continue
                    else:
                        tu=self.remember(nowtime, (vol, p, datetime.datetime.fromisoformat(tStr)))#学习关联
                        d[s]=(tu[0], tu[1], tu[2].isoformat())
            await rdNet.set(i, ujson.dumps([source, d]))#更新关联词条目
    
    def decay(self, volfg):
        if volfg==1:
            return (1, 2)
        elif volfg==2:
            return (2, 60)
        else:
            return (volfg, 0)
            
    async def clearRdNet(self):#长期关联词信息清理
        nowtime=datetime.datetime.now()
        for k in await rdNet.keys("*"):
            source, d=ujson.loads(await rdNet.get(k))
            for i in d.keys():
                vol, p, tStr=d[i]
                pt=nowtime-datetime.datetime.fromisoformat(tStr)
                if vol<3:
                    del d[i]
                elif vol==255:
                    continue
                else:
                    forget=0.56**math.log(vol, math.e)*(pt/self.dt*1800)**0.06
                    volfg=int(vol*(1-forget))
                    if volfg>=self.lifevol:
                        d[i]=(255, 0, 0)
                    else:
                        if volfg<=0:#错误历史积累时间加权弱化清理
                            del d[i]
                        else:
                            d[i]=(*self.decay(volfg), nowtime.isoformat())
                await rdNet.set(k, ujson.dumps([source, d]))
            if not len(d):
                await rdNet.delete(k)
                
    async def clearRt(self):#学习错误的纠正 forget
        nowtime=datetime.datetime.now()
        for k in await rt.keys("*"):#临时双字组合信息清理
            vol, p, tStr=ujson.loads(await rt.get(k))
            pt=now-datetime.datetime.fromisoformat(tStr)
            if vol==255:
                await rdIndex.sadd(k, k)
            if vol==255 or vol<3:
                await rt.delete(k)
            else:
                forget=0.56**math.log(vol, math.e)*(pt/self.dt*1800)**0.06
                volfg=int(vol*(1-forget))
                if volfg>=self.lifevol:
                    await rdIndex.sadd(k, k)
                    await rt.delete(k)
                else:
                    if volfg<=0:#错误历史积累时间加权弱化清理
                        await rt.delete(k)
                    else:
                        tu=(*self.decay(volfg), nowtime.isoformat())
                await rt.set(k, ujson.dumps(tu))
            
    async def intoDream(self):
        await self.clearRt()
        await self.clearRdNet()
        await self.genDictIndex(1)
        
    async def wannSlp(self, mode=1):#1：强制睡眠 0：自动入睡醒来
        if mode:
            await self.intoDream()
        else:
            nowtime=datetime.datetime.now()
            total=await rt.dbsize()
            i = 0
            for k in await rt.keys("*"):
                vol, p, tStr=ujson.loads(await rt.get(k))
                if nowtime-datetime.datetime.fromisoformat(tStr)>=self.drmdays:
                    i+=1
            if i/total>=0.2:
                await self.intoDream()