File size: 4,438 Bytes
2357cdd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import os
os.environ["OPENMIND_HUB_ENDPOINT"]="https://telecom.openmind.cn"
import gradio as gr
import torch
from openmind import AutoModelForCausalLM, AutoTokenizer
from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
from threading import Thread
from huaweicloudsdkcore.auth.credentials import BasicCredentials
from huaweicloudsdkmoderation.v2.region.moderation_region import ModerationRegion
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkmoderation.v2 import *


ak = __import__('os').getenv("CLOUD_SDK_AK")
sk = __import__('os').getenv("CLOUD_SDK_SK")


def text_moderate(unfiltered_text: str, rigion: str):
    """Content Moderation api of HuaweiCloud.
    :param unfiltered_text: The text to be moderated.
    :param rigion: The region that provides content moderation APIs.
    """
    # The AK and SK used for authentication are hard-coded or stored in plaintext, which has great security risks.
    # It is recommended that the AK and SK be stored in ciphertext in configuration files or environment variables and decrypted during use to ensure security.
    # In this example, AK and SK are stored in environment variables for authentication. Before running this example, set environment variables CLOUD_SDK_AK and CLOUD_SDK_SK in the local environment
    

    credentials = BasicCredentials(ak, sk) \

    client = ModerationClient.new_builder() \
        .with_credentials(credentials) \
        .with_region(ModerationRegion.value_of(rigion)) \
        .build()

    try:
        request = RunTextModerationRequest()
        listItemsbody = [
            TextDetectionItemsReq(
                text=unfiltered_text
            )
        ]
        request.body = TextDetectionReq(
            items=listItemsbody
        )
        response = client.run_text_moderation(request)
        return response
    except exceptions.ClientRequestException as e:
        print(e.status_code)
        print(e.request_id)
        print(e.error_code)
        print(e.error_msg)
        raise e("Please make sure that you have subscribe to the content moderation service\
                and export the correct access key and secret key as environment variables.")

tokenizer = AutoTokenizer.from_pretrained("openmind/qwen1.5_7b_chat_pt")
model = AutoModelForCausalLM.from_pretrained("openmind/qwen1.5_7b_chat_pt", torch_dtype=torch.bfloat16)
model.to("npu:0")


class StopOnTokens(StoppingCriteria):
    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
        stop_ids = [2]
        for stop_id in stop_ids:
            if input_ids[0][-1] == stop_id:
                return True
        return False


def predict(message, history):
    stop = StopOnTokens()
    conversation = []

    for user, assistant in history:
        conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])

    conversation.append({"role": "user", "content": message})
    print(f'>>>conversation={conversation}', flush=True)
    prompt = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
    model_inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    streamer = TextIteratorStreamer(tokenizer, timeout=100., skip_prompt=True, skip_special_tokens=True)
    generate_kwargs = dict(
        model_inputs,
        streamer=streamer,
        max_new_tokens=1024,
        do_sample=True,
        top_p=0.95,
        top_k=50,
        temperature=0.7,
        repetition_penalty=1.0,
        num_beams=1,
        stopping_criteria=StoppingCriteriaList([stop])
    )
    t = Thread(target=model.generate, kwargs=generate_kwargs)
    t.start()
    partial_message = ""
    for new_token in streamer:
        partial_message += new_token
        if '</s>' in partial_message:
            break
    if all([ak, sk]):
        res = text_moderate(partial_message, "cn-north-4")
        if res.result.suggestion != "pass":
            partial_message = "抱歉,这个问题我无法回答!"
    return partial_message


# Setting up the Gradio chat interface.
gr.ChatInterface(predict,
                 title="Qwen1.5 7B 对话",
                 description="警告:所有答案都是AI生成的,可能包含不准确的信息。",
                 examples=['杭州有哪些著名的旅游景点?', '海钓有哪些要领?']
                 ).launch()