import nltk
import nltk.data
from gmssl.sm4 import CryptSM4, SM4_ENCRYPT, SM4_DECRYPT
from gmssl import sm4
import random
import hashlib
import hmac
from secrets import randbits
from MMMM.sql import file_manage

p = 148300296503541537023967543385571281566188269121696986355794642903163978311488366178620508621203382790644644256351530344017086776119734526929467544594283880389370982620148075975876610977802799762624795305966277480303398775064011730140292446255046328316204433780624821532022148316432675132662755400581378870583

def hex2dec(string_num):
    return int(string_num.upper(), 16)

# 获取文件中全部不同的单词
def get_unique_word(file_obj):
    text_file = open('D:/Dtest/bysms/MMMM/test.txt', 'r')
    text = text_file.read()

    # cleaning
    text = text.lower()
    words = text.split()
    words = [word.strip('.,!;()[]') for word in words]
    # words = [word.replace("'s", '') for word in words]

    # finding unique
    unique = []
    for word in words:
        if word not in unique:
            unique.append(word)
    # sort
    unique.sort()
    return unique
    # print
    print(unique)


def exgcd(a, b):
    if b == 0:
        return 1, 0, a
    else:
        x, y, q = exgcd(b, a % b)
        x, y = y, (x - (a // b) * y)
        return x, y, q

# 扩展欧几里得求逆元
def ModReverse(a,p):
    x, y, q = exgcd(a,p)
    if q != 1:
        raise Exception("No solution.")
    else:
        return (x + p) % p #防止负数


# 这个是将文本分成不同的句子
from nltk import tokenize
def get_sentense(file_obj):
    tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
    fp = open("D:/Dtest/bysms/MMMM/test.txt")
    data = fp.read()
    return tokenizer.tokenize(data)


def sm4_gen_key(bits):

    num_set = [chr(i) for i in range(48, 58)]
    char_set = [chr(i) for i in range(97, 123)]
    total_set = num_set + char_set
    key = "".join(random.sample(total_set, bits))
    #key = ''.join(random.sample(string.ascii_letters + string.digits, 15))
    # 这个key要转换为bytes才可以用
    return key


def iv_gen(bits):
    num_set = [chr(i) for i in range(48, 58)]
    char_set = [chr(i) for i in range(97, 123)]
    total_set = num_set + char_set
    iv = "".join(random.sample(total_set, bits))
    # iv = ''.join(random.sample(string.ascii_letters + string.digits, 15))
    return iv

def sm4_cbc_enc(plaintext, iv, key):
    crypt_sm4 = CryptSM4()
    crypt_sm4.set_key(key, SM4_ENCRYPT)
    ciphertext_bytes = crypt_sm4.crypt_cbc(iv, plaintext)  # bytes类型

    return ciphertext_bytes


def sm4_cbc_dec(ciphertext, iv, key):
    crypt_sm4 = CryptSM4()
    crypt_sm4.set_key(key, SM4_DECRYPT)
    plaintext_bytes = crypt_sm4.crypt_cbc(iv, ciphertext)

    return plaintext_bytes



def get_Ir(sha_256_key,unique_word_set = []):
    Ir= []
    Ir.append(0)
    j = 1
    while j <= len(unique_word_set):
        # ci = hashlib.sha256(unique_word_set[j-1].encode('utf-8')).hexdigest()
        ci = hmac.new(sha_256_key,unique_word_set[j-1].encode('utf-8'),digestmod=hashlib.sha256).hexdigest()
        Ir.append(ModReverse(int(ci.upper(), 16),p))
        j = j+1
    return Ir



def get_Ic(sm4_key,sm4_iv,sentense_set = []):
    # 注意 这个函数没有写id
    SM4 = sm4.CryptSM4()
    Ic = []
    Ic.append(0)
    i = 1
    while i<= len(sentense_set):
        Ic.append(sm4_cbc_enc(sentense_set[i-1].encode('utf-8'),sm4_iv,sm4_key))
        i = i+1
    return Ic


def get_frequency(text):
    frequency_word = {}
    text = text.lower()
    words = text.split()
    words = [word.strip('.,!;()[]') for word in words]
    for i in words:
        if i in frequency_word:
            frequency_word[i] = frequency_word[i] + 1
        else:
            frequency_word[i] = 1
    return frequency_word


def get_I(prime,ks,k_msk,sentense_set=[],unique_word_set=[],Ic=[],Ir=[],):
    I=[]
    for i in range(0, len(Ic)):
        tmp = []
        for j in range(0, len(Ir)):
            tmp.append([])
        I.append(tmp)

    for i in range(1,len(Ir)):
        I[0][i] = Ir[i]

    for j in range(1,len(Ic)):
        I[j][0] = Ic[j]

    for i in range(0, len(sentense_set)):
        # hash-chain起到临时保存的作用
        hash_chain = []
        text = sentense_set[i]
        text = text.lower()
        words = text.split()
        words = [word.strip('.,!;()[]') for word in words]

        random_1 = random.getrandbits(256)
        for j in range(0, len(words)):
            if j == 0:
                r = random_1
                mask = hmac.new(k_msk,words[j].encode('utf-8'),digestmod=hashlib.sha256).hexdigest()
                mask = int(mask.upper(),16) % prime

                I[i + 1][unique_word_set.index(words[j]) + 1].append(mask+r)
                hash_chain.append(r)

            else:
                r = hmac.new(ks,str(hash_chain[j-1]).encode('utf-8'),digestmod=hashlib.sha256).hexdigest()
                r = int(r.upper(),16)
                mask = hmac.new(k_msk, words[j].encode('utf-8'),digestmod=hashlib.sha256).hexdigest()
                mask = int(mask.upper(),16) % prime

                I[i + 1][unique_word_set.index(words[j]) + 1].append(mask + r)
                hash_chain.append(r)
    return I

# 获取最高频次
def get_f(sentense_set):
    set = []
    for i in range(0,len(sentense_set)):
        dict = get_frequency(sentense_set[i])
        a_sort_list = sorted(dict.items(),key=lambda x:x[1],reverse=True)
        set.append(a_sort_list)

    set_2 = []

    for i in range(0,len(set)):
        set_2.append(set[i][0])
    a_sort_list_2 = sorted(set_2,key=lambda x:x[1],reverse=True)
    return a_sort_list_2[0][1]

# 获取抵御频次攻击的矩阵
def expand(f,I=[],sentense=[],unique_word=[]):
    for i in range(1,len(sentense)+1):
        for j in range(1,len(unique_word)):
            if len(I[i][j]) != f:
                for k in range(0,f-len(I[i][j])):
                    I[i][j].append(randbits(100))


    return I


def add_data(file_obj):
    file = open('D:/Dtest/bysms/MMMM/test.txt',mode='a+')
    text = file_manage(file_obj)
    text=text.replace('.',',')
    text=text.rstrip(',')
    text=text+'.'
    text=' '+text
    file.write(text)

'''
unique_words = get_unique_word('test.txt')
sentenses = get_sentense('test.txt')
prime = 148300296503541537023967543385571281566188269121696986355794642903163978311488366178620508621203382790644644256351530344017086776119734526929467544594283880389370982620148075975876610977802799762624795305966277480303398775064011730140292446255046328316204433780624821532022148316432675132662755400581378870583
key_s = b'\x1a\xde\x9b\xda\xadNq\xf4\xa5\x04g\x02\x99E\x8d\xcf[\x0f\xa6Dx\xe7\t\xbb\x1e\xb2n\xbd|h\xcf\x16'
key_m_1 = b'\xba\x8a\xed\x17y\x8f2[y]{\xd7\x1a\xb8\x05\x08\xf2CL\xe8\xdaY\xa2\xd0o=\x87\xc5De\xb1\x95'
key_m_2= '8bjp6lyrgfn370i5'
key_1 = b'\xaa%\x00"\x08)+\x84\xf1I\x0e\xeb\xb6\xb2\xd4L\x8eBd\xbc{\xc1\xc7\xc9\xd3`\xb1\xfb\xbbZ\xa2\xed'
iv = 'gvmr10z4pc6xqn3l'
iv_bytes = bytes(iv,encoding='utf8')
Ir = get_Ir(key_m_1,unique_words)
Ic = get_Ic(key_m_2.encode('utf-8'),iv_bytes,sentenses)
I = get_I(prime,key_s,key_1,sentenses,unique_words,Ic,Ir)
print(I)

s1 = get_f(sentenses)

print("-----------初始矩阵-----------")
for i in I:
    print(i)


I_2 = expand(s1,I,sentenses,unique_words)

print("-----------扩充f后的矩阵-----------")
for i in I_2:
    print(i)
    '''





