import hashlib
import json
import os
import random
import string
import urllib.parse
import requests
import time


# 青云可调用机器人接口
def qingyunke(msg):

    url = "http://api.qingyunke.com/api.php?key=free&appid=0&msg=%s" % (urllib.parse.quote(msg))
    html = requests.get(url)

    return html.json()["content"]

# print(qingyunke("你好"))


# 微软小冰
# SUB = _2A25yqllVDeRhGeBM41ET8ifNzD6IHXVR3s2drDV8PUNbmtAKLVDHkW9NRK_6Ww37Nt59QaZBHYavNcS4BPfmjzyk
# uid: 5175429989
# source: 209678993

def xiaobing(msg):
    uid = '5175429989'
    source = '209678993'
    SUB = '_2A25ytWncDeRhGeBM41ET8ifNzD6IHXVRw9wUrDV8PUNbmtANLRn'
    url_send = 'https://api.weibo.com/webim/2/direct_messages/new.json'
    data = {
        'text': msg,
        'uid': uid,
        'source': source
    }
    headers = {
        'cookie': 'SUB=' + SUB,
        'Content-Type': 'application/x-www-form-urlencoded',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
        'Referer': 'https://api.weibo.com/chat/'
    }
    response = requests.post(url_send, data=data, headers=headers).json()
    sendMsg = response['text']
    # time.sleep(1)

    while True:
        url_get = 'https://api.weibo.com/webim/2/direct_messages/conversation.json?uid={}&source={}'.format(uid, source)
        response = requests.get(url_get, headers=headers).json()
        getMsg = response['direct_messages'][0]['text']
        if sendMsg == getMsg:
            # time.sleep(1)
            pass
        else:
            return getMsg
# while 1:
    # msg = '后会无期'
    # print("原话>>", msg)
    # res = xiaobing(msg)
    # print("小冰>>", res)


# App_ID2159655914
# App_Keyx5RK1ZQfaqZRvFEr


def tencent(msg):
    APPID = '2159655914'
    APPKEY = 'x5RK1ZQfaqZRvFEr'
    url = 'https://api.ai.qq.com/fcgi-bin/nlp/nlp_textchat'
    params = {
        'app_id': APPID,
        'time_stamp': str(int(time.time())),
        'nonce_str': ''.join(random.choice(string.ascii_letters + string.digits) for x in range(16)),
        'session': '10000'.encode('utf-8'),
        'question': msg.encode('utf-8')
    }
    sign_before = ''
    for key in sorted(params):
        # 键值拼接过程value部分需要URL编码，URL编码算法用大写字母，例如%E8。quote默认大写。
        sign_before += '{}={}&'.format(key, urllib.parse.quote(params[key], safe=''))
        # 将应用密钥以app_key为键名，拼接到字符串sign_before末尾
    sign_before += 'app_key={}'.format(APPKEY)

    # 对字符串sign_before进行MD5运算，得到接口请求签名
    sign = hashlib.md5(sign_before.encode('UTF-8')).hexdigest().upper()
    params['sign'] = sign
    # print(params)
    html = requests.post(url, data=params).json()
    return html['data']['answer']
# msg= '后悔无期'
# print("原话>>", msg)
# res = tencent(msg)
# print("腾讯>>", res)


# 图灵机器人
def tuling(msg):
    api_key = "aad288c840464f4cae124829db5f0ddb"
    url = 'http://openapi.tuling123.com/openapi/api/v2'
    data = {
        "perception": {
            "inputText": {
                "text": msg
            },
        },
        "userInfo": {
            "apiKey": api_key,
            "userId": "677498"
        }
    }
    datas = json.dumps(data)
    html = requests.post(url, datas).json()
    if html['intent']['code'] == 4003:
        print("次数用完")
        return None
    return html['results'][0]['values']['text']
# msg = '我好看吗'
# print("原话>>", msg)
# res = tuling(msg)
# print("图灵>>", res)






# 自定义机器人
# 脚本test_bot.py导入需要的库
import nltk
import ssl
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
from keras.models import model_from_json
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
import pandas as pd
import pickle
import random

from mydjango.settings import ROBOT_ROOT



import nltk
nltk.download('punkt')


# 语料
intents = {"intents": [
        {"tag": "打招呼",
         "patterns": ["你好", "您好", "请问", "有人吗", "师傅","不好意思","美女","帅哥","靓妹","hi"],
         "responses": ["您好", "又是您啊", "吃了么您内","您有事吗"],
         "context": [""]
        },
        {"tag": "告别",
         "patterns": ["再见", "拜拜", "88", "回见", "回头见"],
         "responses": ["再见", "一路顺风", "下次见", "拜拜了您内"],
         "context": [""]
        },
   ]
}


words = []
documents = []
classes = []
ignore_words = []

for intent in intents['intents']:
    for pattern in intent['patterns']:
        # 将句子中的每个单词标记成记号
        # 将每个词转换为列表["拜拜"]的类型
        w = nltk.word_tokenize(pattern)
        # 添加到我们的词汇表中
        # 添加到words列表中
        words.extend(w)
        # 添加到我们的语料库中的文档
        # 和类型以元祖的形式存放到list中[("你好", "打招呼"), ("88","告别")]
        documents.append((w, intent['tag']))
        # add to our classes list
        # 将分类列表中没有的分类添加进去
        if intent['tag'] not in classes:
            classes.append(intent['tag'])

words = [stemmer.stem(w.lower()) for w in words if w not in ignore_words]
# 将这个列表进行去重
words = sorted(list(set(words)))
classes = sorted(list(set(classes)))

# print (len(classes), "语境", classes)
#
# print (len(words), "词数", words)
#
#
#
# # 创建我们的培训数据
# training = []
# # 为输出创建一个空数组
# output_empty = [0] * len(classes)
# # 训练集，每句话的单词袋
# for doc in documents:
#     # 初始化单词包
#     bag = []
#
#     pattern_words = doc[0]
#
#     pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]
#
#     for w in words:
#         bag.append(1) if w in pattern_words else bag.append(0)
#
#     output_row = list(output_empty)
#     output_row[classes.index(doc[1])] = 1
#
#     training.append([bag, output_row])
#
# random.shuffle(training)
# training = np.array(training)
#
# train_x = list(training[:, 0])
# train_y = list(training[:, 1])
#
#
#
#
# model = Sequential()
# model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
# model.add(Dropout(0.5))
# model.add(Dense(64, activation='relu'))
# model.add(Dropout(0.5))
# model.add(Dense(len(train_y[0]), activation='softmax'))
#
#
# sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
#
#
# model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)





def clean_up_sentence(sentence):
    # tokenize the pattern - split words into array
    sentence_words = nltk.word_tokenize(sentence)
    # stem each word - create short form for word
    sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]
    return sentence_words

def bow(sentence, words, show_details=True):
    # tokenize the pattern
    sentence_words = clean_up_sentence(sentence)
    # bag of words - matrix of N words, vocabulary matrix
    bag = [0]*len(words)
    for s in sentence_words:
        for i,w in enumerate(words):
            if w == s:
                # assign 1 if current word is in the vocabulary position
                bag[i] = 1
                if show_details:
                    print ("found in bag: %s" % w)
    return(np.array(bag))


def classify_local(sentence):
    ERROR_THRESHOLD = 0.25

    file = open(os.path.join(ROBOT_ROOT, 'v3ucn.json'), 'r')
    model_json = file.read()
    file.close()
    model = model_from_json(model_json)
    model.load_weights(os.path.join(ROBOT_ROOT, 'v3ucn.h5f'))

    # generate probabilities from the model
    input_data = pd.DataFrame([bow(sentence, words)], dtype=float, index=['input'])
    results = model.predict([input_data])[0]
    # filter out predictions below a threshold, and provide intent index
    results = [[i, r] for i, r in enumerate(results) if r > ERROR_THRESHOLD]
    # sort by strength of probability
    results.sort(key=lambda x: x[1], reverse=True)
    return_list = []
    for r in results:
        return_list.append((classes[r[0]], str(r[1])))
    # return tuple of intent and probability

    return return_list

# print(classify_local("hhhh"))


def customrobot(msg):
    wordlist = classify_local(msg)
    matched_degree = wordlist[0][1]
    if float(matched_degree) < 0.99:
        return "听不懂你在说啥"

    for intent in intents['intents']:
        if intent['tag'] == wordlist[0][0]:
            msg = random.choice(intent['responses'])

    return msg

# print(customrobot("你好"))

# json_file = model.to_json()
# with open('v3ucn.json', "w") as file:
#    file.write(json_file)
#
# model.save_weights('./v3ucn.h5f')