# -*- coding: UTF-8 -*-
'''
@File ：6_聊天生成.py
@IDE ：PyCharm
@Author ：chaojie
@Date ：2025/11/7 
@Introduce:
'''
import os
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
os.environ['XDG_CACHE_HOME'] = r"D:\weights"
os.environ['CACHE_HOME'] = r'D:\weights'
os.environ['MODELSCOPE_CACHE'] = r'D:\weights\modelscope'

def t1():
    from modelscope.pipelines import pipeline
    from modelscope.utils.constant import Tasks
    from modelscope.models import Model

    from modelscope.pipelines.nlp.fid_dialogue_pipeline import FidDialoguePipeline
    from modelscope.models.nlp.fid_plug.text_generation import PlugV2FidChat
    from modelscope.preprocessors.nlp.text_generation_preprocessor import TextGenerationTransformersPreprocessor

    model_id = 'iic/ChatPLUG-240M'
    # device可以设置为cpu, cuda, gpu, gpu:X 或 cuda:X
    pipeline_ins = pipeline(Tasks.fid_dialogue, model=model_id, model_revision='master', device='gpu',
                            trust_remote_code=True)

    print(type(pipeline_ins))
    print(type(pipeline_ins.model))
    print(type(pipeline_ins.preprocessor))
    print(pipeline_ins.model)





if __name__ == '__main__':
    t1()