diff --git "a/ast_indexer" "b/ast_indexer" new file mode 100644--- /dev/null +++ "b/ast_indexer" @@ -0,0 +1 @@ +{"index": {"('MODELS', 'task-oriented-conversation', 'space-intent')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/dialog_intent_prediction.py", "imports": ["typing", "os"], "module": "modelscope.models.nlp.space.dialog_intent_prediction"}, "('MODELS', 'task-oriented-conversation', 'space-dst')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/dialog_state_tracking.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.models.nlp.space.dialog_state_tracking"}, "('MODELS', 'task-oriented-conversation', 'space-modeling')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/dialog_modeling.py", "imports": ["typing", "os"], "module": "modelscope.models.nlp.space.dialog_modeling"}, "('MODELS', 'table-question-answering', 'space-T-en')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space_T_en/text_to_sql.py", "imports": ["text2sql_lgesql", "typing", "torch", "os"], "module": "modelscope.models.nlp.space_T_en.text_to_sql"}, "('MODELS', 'document-grounded-dialog-retrieval', 'doc2bot')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/dgds/document_grounded_dialog_retrieval.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.nlp.dgds.document_grounded_dialog_retrieval"}, "('MODELS', 'document-grounded-dialog-rerank', 'doc2bot')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/dgds/document_grounded_dialog_rerank.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.nlp.dgds.document_grounded_dialog_rerank"}, "('MODELS', 'document-grounded-dialog-generate', 'doc2bot')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/dgds/document_grounded_dialog_generate.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.nlp.dgds.document_grounded_dialog_generate"}, "('MODELS', 'text-classification', 'peer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/peer/text_classification.py", "imports": ["copy", "torch"], "module": "modelscope.models.nlp.peer.text_classification"}, "('MODELS', 'nli', 'peer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/peer/text_classification.py", "imports": ["copy", "torch"], "module": "modelscope.models.nlp.peer.text_classification"}, "('MODELS', 'sentiment-classification', 'peer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/peer/text_classification.py", "imports": ["copy", "torch"], "module": "modelscope.models.nlp.peer.text_classification"}, "('MODELS', 'sentence-similarity', 'peer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/peer/text_classification.py", "imports": ["copy", "torch"], "module": "modelscope.models.nlp.peer.text_classification"}, "('MODELS', 'zero-shot-classification', 'peer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/peer/text_classification.py", "imports": ["copy", "torch"], "module": "modelscope.models.nlp.peer.text_classification"}, "('MODELS', 'fill-mask', 'ponet')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/ponet/fill_mask.py", "imports": ["transformers", "torch"], "module": "modelscope.models.nlp.ponet.fill_mask"}, "('MODELS', 'backbone', 'ponet')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/ponet/backbone.py", "imports": ["distutils", "math", "packaging", "transformers", "torch"], "module": "modelscope.models.nlp.ponet.backbone"}, "('MODELS', 'document-segmentation', 'ponet-for-document-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/ponet/document_segmentation.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.ponet.document_segmentation"}, "('MODELS', 'extractive-summarization', 'ponet-for-document-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/ponet/document_segmentation.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.ponet.document_segmentation"}, "('MODELS', 'fid-dialogue', 'fid-T5')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/fid_T5/text_generation.py", "imports": ["io", "transformers", "torch", "os"], "module": "modelscope.models.nlp.fid_T5.text_generation"}, "('MODELS', 'competency-aware-translation', 'canmt')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/canmt/canmt_translation.py", "imports": ["numpy", "math", "os", "typing", "torch"], "module": "modelscope.models.nlp.canmt.canmt_translation"}, "('MODELS', 'chat', 'chatglm2-6b')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/chatglm2/text_generation.py", "imports": ["copy", "math", "sys", "typing", "transformers", "torch", "warnings"], "module": "modelscope.models.nlp.chatglm2.text_generation"}, "('MODELS', 'text-classification', 'plug-mental')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/plug_mental/text_classification.py", "imports": ["torch"], "module": "modelscope.models.nlp.plug_mental.text_classification"}, "('MODELS', 'nli', 'plug-mental')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/plug_mental/text_classification.py", "imports": ["torch"], "module": "modelscope.models.nlp.plug_mental.text_classification"}, "('MODELS', 'sentiment-classification', 'plug-mental')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/plug_mental/text_classification.py", "imports": ["torch"], "module": "modelscope.models.nlp.plug_mental.text_classification"}, "('MODELS', 'sentence-similarity', 'plug-mental')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/plug_mental/text_classification.py", "imports": ["torch"], "module": "modelscope.models.nlp.plug_mental.text_classification"}, "('MODELS', 'zero-shot-classification', 'plug-mental')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/plug_mental/text_classification.py", "imports": ["torch"], "module": "modelscope.models.nlp.plug_mental.text_classification"}, "('MODELS', 'backbone', 'plug-mental')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/plug_mental/backbone.py", "imports": ["math", "packaging", "dataclasses", "transformers", "typing", "torch"], "module": "modelscope.models.nlp.plug_mental.backbone"}, "('MODELS', 'backbone', 'transformers')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/hf_transformers/backbone.py", "imports": ["transformers"], "module": "modelscope.models.nlp.hf_transformers.backbone"}, "('MODELS', 'translation', 'csanmt-translation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/csanmt/translation.py", "imports": ["collections", "typing", "tensorflow", "math"], "module": "modelscope.models.nlp.csanmt.translation"}, "('MODELS', 'nli', 'veco')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/veco/text_classification.py", "imports": ["transformers"], "module": "modelscope.models.nlp.veco.text_classification"}, "('MODELS', 'sentiment-classification', 'veco')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/veco/text_classification.py", "imports": ["transformers"], "module": "modelscope.models.nlp.veco.text_classification"}, "('MODELS', 'sentence-similarity', 'veco')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/veco/text_classification.py", "imports": ["transformers"], "module": "modelscope.models.nlp.veco.text_classification"}, "('MODELS', 'text-classification', 'veco')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/veco/text_classification.py", "imports": ["transformers"], "module": "modelscope.models.nlp.veco.text_classification"}, "('MODELS', 'fill-mask', 'veco')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/veco/fill_mask.py", "imports": ["transformers"], "module": "modelscope.models.nlp.veco.fill_mask"}, "('MODELS', 'backbone', 'veco')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/veco/backbone.py", "imports": ["transformers"], "module": "modelscope.models.nlp.veco.backbone"}, "('MODELS', 'token-classification', 'veco')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/veco/token_classification.py", "imports": ["transformers", "torch"], "module": "modelscope.models.nlp.veco.token_classification"}, "('MODELS', 'backbone', 'llama')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/llama/backbone.py", "imports": ["typing", "transformers", "torch", "math"], "module": "modelscope.models.nlp.llama.backbone"}, "('MODELS', 'text-generation', 'llama')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/llama/text_generation.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.llama.text_generation"}, "('MODELS', 'backbone', 'llama2')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/llama2/backbone.py", "imports": ["typing", "transformers", "torch", "math"], "module": "modelscope.models.nlp.llama2.backbone"}, "('MODELS', 'text-generation', 'llama2')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/llama2/text_generation.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.models.nlp.llama2.text_generation"}, "('MODELS', 'text-generation', 'polylm')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/polylm/text_generation.py", "imports": ["collections", "typing", "transformers", "torch"], "module": "modelscope.models.nlp.polylm.text_generation"}, "('MODELS', 'code-translation', 'codegeex')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/codegeex/codegeex_for_code_translation.py", "imports": ["typing", "copy", "torch"], "module": "modelscope.models.nlp.codegeex.codegeex_for_code_translation"}, "('MODELS', 'code-generation', 'codegeex')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/codegeex/codegeex_for_code_generation.py", "imports": ["typing", "copy", "torch"], "module": "modelscope.models.nlp.codegeex.codegeex_for_code_generation"}, "('MODELS', 'text-generation', 'glm130b')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/glm_130b/text_generation.py", "imports": ["copy", "os", "time", "functools", "stat", "re", "random", "sys", "typing", "torch", "SwissArmyTransformer"], "module": "modelscope.models.nlp.glm_130b.text_generation"}, "('BACKBONES', 'backbone', 'bloom')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bloom/backbone.py", "imports": ["transformers"], "module": "modelscope.models.nlp.bloom.backbone"}, "('MODELS', 'backbone', 'T5')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/T5/backbone.py", "imports": ["copy", "math", "os", "typing", "transformers", "torch", "warnings"], "module": "modelscope.models.nlp.T5.backbone"}, "('MODELS', 'text2text-generation', 'T5')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/T5/text2text_generation.py", "imports": ["copy", "typing", "transformers", "torch", "warnings"], "module": "modelscope.models.nlp.T5.text2text_generation"}, "('MODELS', 'chat', 'chatglm6b')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/chatglm/text_generation.py", "imports": ["copy", "math", "os", "re", "sys", "typing", "transformers", "torch", "warnings"], "module": "modelscope.models.nlp.chatglm.text_generation"}, "('MODELS', 'text-classification', 'structbert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/text_classification.py", "imports": ["torch"], "module": "modelscope.models.nlp.structbert.text_classification"}, "('MODELS', 'nli', 'structbert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/text_classification.py", "imports": ["torch"], "module": "modelscope.models.nlp.structbert.text_classification"}, "('MODELS', 'sentiment-classification', 'structbert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/text_classification.py", "imports": ["torch"], "module": "modelscope.models.nlp.structbert.text_classification"}, "('MODELS', 'sentence-similarity', 'structbert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/text_classification.py", "imports": ["torch"], "module": "modelscope.models.nlp.structbert.text_classification"}, "('MODELS', 'zero-shot-classification', 'structbert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/text_classification.py", "imports": ["torch"], "module": "modelscope.models.nlp.structbert.text_classification"}, "('MODELS', 'faq-question-answering', 'structbert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/faq_question_answering.py", "imports": ["math", "os", "typing", "torch", "collections"], "module": "modelscope.models.nlp.structbert.faq_question_answering"}, "('MODELS', 'fill-mask', 'structbert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/fill_mask.py", "imports": ["transformers", "torch"], "module": "modelscope.models.nlp.structbert.fill_mask"}, "('MODELS', 'backbone', 'structbert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/backbone.py", "imports": ["math", "packaging", "dataclasses", "transformers", "typing", "torch"], "module": "modelscope.models.nlp.structbert.backbone"}, "('MODELS', 'token-classification', 'structbert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/token_classification.py", "imports": ["torch"], "module": "modelscope.models.nlp.structbert.token_classification"}, "('MODELS', 'word-segmentation', 'structbert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/token_classification.py", "imports": ["torch"], "module": "modelscope.models.nlp.structbert.token_classification"}, "('MODELS', 'part-of-speech', 'structbert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/token_classification.py", "imports": ["torch"], "module": "modelscope.models.nlp.structbert.token_classification"}, "('MODELS', 'table-question-answering', 'space-T-cn')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space_T_cn/table_question_answering.py", "imports": ["numpy", "os", "transformers", "typing", "torch"], "module": "modelscope.models.nlp.space_T_cn.table_question_answering"}, "('HEADS', 'text-ranking', 'text-ranking')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/text_ranking_head.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.heads.text_ranking_head"}, "('HEADS', 'text-generation', 'text-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/text_generation_head.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.heads.text_generation_head"}, "('HEADS', 'fill-mask', 'roberta-mlm')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/torch_pretrain_head.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.models.nlp.heads.torch_pretrain_head"}, "('HEADS', 'fill-mask', 'bert-mlm')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/fill_mask_head.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.models.nlp.heads.fill_mask_head"}, "('HEADS', 'fill-mask', 'fill-mask')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/fill_mask_head.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.models.nlp.heads.fill_mask_head"}, "('HEADS', 'fill-mask', 'xlm-roberta-mlm')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/fill_mask_head.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.models.nlp.heads.fill_mask_head"}, "('HEADS', 'token-classification', 'lstm-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/crf_head.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.models.nlp.heads.crf_head"}, "('HEADS', 'named-entity-recognition', 'lstm-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/crf_head.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.models.nlp.heads.crf_head"}, "('HEADS', 'word-segmentation', 'lstm-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/crf_head.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.models.nlp.heads.crf_head"}, "('HEADS', 'part-of-speech', 'lstm-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/crf_head.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.models.nlp.heads.crf_head"}, "('HEADS', 'transformer-crf', 'transformer-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/crf_head.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.models.nlp.heads.crf_head"}, "('HEADS', 'token-classification', 'transformer-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/crf_head.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.models.nlp.heads.crf_head"}, "('HEADS', 'named-entity-recognition', 'transformer-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/crf_head.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.models.nlp.heads.crf_head"}, "('HEADS', 'word-segmentation', 'transformer-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/crf_head.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.models.nlp.heads.crf_head"}, "('HEADS', 'part-of-speech', 'transformer-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/crf_head.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.models.nlp.heads.crf_head"}, "('HEADS', 'token-classification', 'token-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/token_classification_head.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.heads.token_classification_head"}, "('HEADS', 'named-entity-recognition', 'token-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/token_classification_head.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.heads.token_classification_head"}, "('HEADS', 'part-of-speech', 'token-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/token_classification_head.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.heads.token_classification_head"}, "('HEADS', 'information-extraction', 'information-extraction')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/infromation_extraction_head.py", "imports": ["torch"], "module": "modelscope.models.nlp.heads.infromation_extraction_head"}, "('HEADS', 'relation-extraction', 'information-extraction')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/infromation_extraction_head.py", "imports": ["torch"], "module": "modelscope.models.nlp.heads.infromation_extraction_head"}, "('HEADS', 'text-classification', 'text-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/text_classification_head.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.heads.text_classification_head"}, "('HEADS', 'sentence-similarity', 'text-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/text_classification_head.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.heads.text_classification_head"}, "('HEADS', 'nli', 'text-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/text_classification_head.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.heads.text_classification_head"}, "('HEADS', 'sentiment-classification', 'text-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/text_classification_head.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.heads.text_classification_head"}, "('MODELS', 'fid-dialogue', 'fid-plug')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/fid_plug/text_generation.py", "imports": ["io", "transformers", "torch", "os"], "module": "modelscope.models.nlp.fid_plug.text_generation"}, "('MODELS', 'text-error-correction', 'bart')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bart/text_error_correction.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.nlp.bart.text_error_correction"}, "('MODELS', 'text-generation', 'gpt3')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt3/text_generation.py", "imports": ["collections", "typing", "transformers", "torch"], "module": "modelscope.models.nlp.gpt3.text_generation"}, "('MODELS', 'fill-mask', 'deberta_v2')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/deberta_v2/fill_mask.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.models.nlp.deberta_v2.fill_mask"}, "('MODELS', 'backbone', 'deberta_v2')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/deberta_v2/backbone.py", "imports": ["collections", "typing", "transformers", "torch"], "module": "modelscope.models.nlp.deberta_v2.backbone"}, "('MODELS', 'text-classification', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/text_classification.py", "imports": [], "module": "modelscope.models.nlp.bert.text_classification"}, "('MODELS', 'nli', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/text_classification.py", "imports": [], "module": "modelscope.models.nlp.bert.text_classification"}, "('MODELS', 'sentiment-classification', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/text_classification.py", "imports": [], "module": "modelscope.models.nlp.bert.text_classification"}, "('MODELS', 'sentence-similarity', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/text_classification.py", "imports": [], "module": "modelscope.models.nlp.bert.text_classification"}, "('MODELS', 'zero-shot-classification', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/text_classification.py", "imports": [], "module": "modelscope.models.nlp.bert.text_classification"}, "('MODELS', 'text-ranking', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/text_ranking.py", "imports": [], "module": "modelscope.models.nlp.bert.text_ranking"}, "('MODELS', 'fill-mask', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/fill_mask.py", "imports": [], "module": "modelscope.models.nlp.bert.fill_mask"}, "('MODELS', 'backbone', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/backbone.py", "imports": ["packaging", "transformers", "torch", "math"], "module": "modelscope.models.nlp.bert.backbone"}, "('MODELS', 'document-segmentation', 'bert-for-document-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/document_segmentation.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.bert.document_segmentation"}, "('MODELS', 'sentence-embedding', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/sentence_embedding.py", "imports": ["torch"], "module": "modelscope.models.nlp.bert.sentence_embedding"}, "('MODELS', 'word-alignment', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/word_alignment.py", "imports": ["torch"], "module": "modelscope.models.nlp.bert.word_alignment"}, "('MODELS', 'siamese-uie', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/siamese_uie.py", "imports": ["copy", "torch"], "module": "modelscope.models.nlp.bert.siamese_uie"}, "('MODELS', 'token-classification', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/token_classification.py", "imports": [], "module": "modelscope.models.nlp.bert.token_classification"}, "('MODELS', 'part-of-speech', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/token_classification.py", "imports": [], "module": "modelscope.models.nlp.bert.token_classification"}, "('MODELS', 'word-segmentation', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/token_classification.py", "imports": [], "module": "modelscope.models.nlp.bert.token_classification"}, "('BACKBONES', 'backbone', 'gpt-neo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt_neo/backbone.py", "imports": ["transformers"], "module": "modelscope.models.nlp.gpt_neo.backbone"}, "('BACKBONES', 'backbone', 'gpt2')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt2/backbone.py", "imports": ["transformers"], "module": "modelscope.models.nlp.gpt2.backbone"}, "('MODELS', 'text-generation', 'palm-v2')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/palm_v2/text_generation.py", "imports": ["numpy", "copy", "math", "os", "subprocess", "dataclasses", "typing", "transformers", "torch", "codecs", "json"], "module": "modelscope.models.nlp.palm_v2.text_generation"}, "('MODELS', 'fill-mask', 'megatron-bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/megatron_bert/fill_mask.py", "imports": ["transformers", "torch"], "module": "modelscope.models.nlp.megatron_bert.fill_mask"}, "('MODELS', 'backbone', 'megatron-bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/megatron_bert/backbone.py", "imports": ["transformers", "torch", "math"], "module": "modelscope.models.nlp.megatron_bert.backbone"}, "('MODELS', 'backbone', 'lstm')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/lstm/backbone.py", "imports": ["torch"], "module": "modelscope.models.nlp.lstm.backbone"}, "('MODELS', 'token-classification', 'lstm-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/lstm/token_classification.py", "imports": [], "module": "modelscope.models.nlp.lstm.token_classification"}, "('MODELS', 'named-entity-recognition', 'lstm-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/lstm/token_classification.py", "imports": [], "module": "modelscope.models.nlp.lstm.token_classification"}, "('MODELS', 'part-of-speech', 'lstm-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/lstm/token_classification.py", "imports": [], "module": "modelscope.models.nlp.lstm.token_classification"}, "('MODELS', 'word-segmentation', 'lstm-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/lstm/token_classification.py", "imports": [], "module": "modelscope.models.nlp.lstm.token_classification"}, "('MODELS', 'word-segmentation', 'lstm-crf-for-word-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/lstm/token_classification.py", "imports": [], "module": "modelscope.models.nlp.lstm.token_classification"}, "('MODELS', 'text-classification', 'user-satisfaction-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/use/user_satisfaction_estimation.py", "imports": ["numpy", "os", "transformers", "typing", "torch"], "module": "modelscope.models.nlp.use.user_satisfaction_estimation"}, "('MODELS', 'text-generation', 'gpt-moe')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt_moe/text_generation.py", "imports": ["typing", "transformers"], "module": "modelscope.models.nlp.gpt_moe.text_generation"}, "('MODELS', 'text-summarization', 'mglm')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/mglm_for_text_summarization.py", "imports": ["numpy", "os", "random", "typing", "torch", "megatron_util"], "module": "modelscope.models.nlp.mglm.mglm_for_text_summarization"}, "('MODELS', 'information-extraction', 'information-extraction')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/information_extraction.py", "imports": ["numpy", "typing"], "module": "modelscope.models.nlp.task_models.information_extraction"}, "('MODELS', 'relation-extraction', 'information-extraction')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/information_extraction.py", "imports": ["numpy", "typing"], "module": "modelscope.models.nlp.task_models.information_extraction"}, "('MODELS', 'text-classification', 'text-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/text_classification.py", "imports": ["numpy", "typing"], "module": "modelscope.models.nlp.task_models.text_classification"}, "('MODELS', 'text-ranking', 'text-ranking')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/text_ranking.py", "imports": ["numpy", "typing"], "module": "modelscope.models.nlp.task_models.text_ranking"}, "('MODELS', 'fill-mask', 'fill-mask')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/fill_mask.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.models.nlp.task_models.fill_mask"}, "('MODELS', 'machine-reading-comprehension', 'machine-reading-comprehension')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/machine_reading_comprehension.py", "imports": ["os", "dataclasses", "typing", "transformers", "torch"], "module": "modelscope.models.nlp.task_models.machine_reading_comprehension"}, "('MODELS', 'text-generation', 'text-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/text_generation.py", "imports": ["numpy", "transformers", "torch", "typing"], "module": "modelscope.models.nlp.task_models.text_generation"}, "('MODELS', 'feature-extraction', 'feature-extraction')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/feature_extraction.py", "imports": ["numpy", "typing"], "module": "modelscope.models.nlp.task_models.feature_extraction"}, "('MODELS', 'token-classification', 'token-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/token_classification.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.task_models.token_classification"}, "('MODELS', 'part-of-speech', 'token-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/token_classification.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.task_models.token_classification"}, "('MODELS', 'named-entity-recognition', 'token-classification-for-ner')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/token_classification.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.task_models.token_classification"}, "('MODELS', 'transformer-crf', 'transformer-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/token_classification.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.task_models.token_classification"}, "('MODELS', 'token-classification', 'transformer-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/token_classification.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.task_models.token_classification"}, "('MODELS', 'token-classification', 'transformer-crf-for-word-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/token_classification.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.task_models.token_classification"}, "('MODELS', 'named-entity-recognition', 'transformer-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/token_classification.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.task_models.token_classification"}, "('MODELS', 'part-of-speech', 'transformer-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/token_classification.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.task_models.token_classification"}, "('MODELS', 'word-segmentation', 'transformer-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/token_classification.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.task_models.token_classification"}, "('MODELS', 'word-segmentation', 'transformer-crf-for-word-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/token_classification.py", "imports": ["typing", "torch"], "module": "modelscope.models.nlp.task_models.token_classification"}, "('MODELS', 'backbone', 'xlm-roberta')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/xlm_roberta/backbone.py", "imports": ["packaging", "transformers", "torch", "math"], "module": "modelscope.models.nlp.xlm_roberta.backbone"}, "('MODELS', 'backbone', 'qwen-7b')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/qwen/backbone.py", "imports": ["importlib", "einops", "math", "typing", "transformers", "flash_attn", "torch"], "module": "modelscope.models.nlp.qwen.backbone"}, "('MODELS', 'text-generation', 'qwen-7b')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/qwen/text_generation.py", "imports": ["typing", "transformers", "torch", "warnings"], "module": "modelscope.models.nlp.qwen.text_generation"}, "('MODELS', 'chat', 'qwen-7b')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/qwen/text_generation.py", "imports": ["typing", "transformers", "torch", "warnings"], "module": "modelscope.models.nlp.qwen.text_generation"}, "('MODELS', 'translation-evaluation', 'unite')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/unite/translation_evaluation.py", "imports": ["numpy", "math", "packaging", "dataclasses", "typing", "transformers", "torch", "warnings"], "module": "modelscope.models.nlp.unite.translation_evaluation"}, "('MODELS', 'text-to-image-synthesis', 'diffusion-text-to-image-synthesis')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/diffusion/model.py", "imports": ["numpy", "os", "typing", "torch", "json"], "module": "modelscope.models.multi_modal.diffusion.model"}, "('MODELS', 'multimodal-dialogue', 'mplug-owl')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mplug_owl/modeling_mplug_owl.py", "imports": ["logging", "copy", "math", "os", "random", "dataclasses", "transformers", "typing", "torch", "io"], "module": "modelscope.models.multi_modal.mplug_owl.modeling_mplug_owl"}, "('MODELS', 'text-to-video-synthesis', 'latent-text-to-video-synthesis')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/video_synthesis/text_to_video_synthesis_model.py", "imports": ["einops", "os", "open_clip", "typing", "torch"], "module": "modelscope.models.multi_modal.video_synthesis.text_to_video_synthesis_model"}, "('MODELS', 'text-to-video-synthesis', 'videocomposer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/videocomposer_model.py", "imports": ["copy", "einops", "os", "open_clip", "typing", "torch", "pynvml"], "module": "modelscope.models.multi_modal.videocomposer.videocomposer_model"}, "('MODELS', 'video-temporal-grounding', 'soonet')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/soonet/model.py", "imports": ["torch", "os"], "module": "modelscope.models.multi_modal.soonet.model"}, "('MODELS', 'text-to-image-synthesis', 'stable-diffusion')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/stable_diffusion/stable_diffusion.py", "imports": ["os", "functools", "packaging", "typing", "transformers", "torch", "diffusers"], "module": "modelscope.models.multi_modal.stable_diffusion.stable_diffusion"}, "('MODELS', 'image-captioning', 'ofa')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa_for_all_tasks.py", "imports": ["string", "math", "os", "functools", "re", "typing", "torch", "json"], "module": "modelscope.models.multi_modal.ofa_for_all_tasks"}, "('MODELS', 'ocr-recognition', 'ofa')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa_for_all_tasks.py", "imports": ["string", "math", "os", "functools", "re", "typing", "torch", "json"], "module": "modelscope.models.multi_modal.ofa_for_all_tasks"}, "('MODELS', 'visual-grounding', 'ofa')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa_for_all_tasks.py", "imports": ["string", "math", "os", "functools", "re", "typing", "torch", "json"], "module": "modelscope.models.multi_modal.ofa_for_all_tasks"}, "('MODELS', 'visual-question-answering', 'ofa')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa_for_all_tasks.py", "imports": ["string", "math", "os", "functools", "re", "typing", "torch", "json"], "module": "modelscope.models.multi_modal.ofa_for_all_tasks"}, "('MODELS', 'visual-entailment', 'ofa')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa_for_all_tasks.py", "imports": ["string", "math", "os", "functools", "re", "typing", "torch", "json"], "module": "modelscope.models.multi_modal.ofa_for_all_tasks"}, "('MODELS', 'image-classification', 'ofa')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa_for_all_tasks.py", "imports": ["string", "math", "os", "functools", "re", "typing", "torch", "json"], "module": "modelscope.models.multi_modal.ofa_for_all_tasks"}, "('MODELS', 'text-summarization', 'ofa')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa_for_all_tasks.py", "imports": ["string", "math", "os", "functools", "re", "typing", "torch", "json"], "module": "modelscope.models.multi_modal.ofa_for_all_tasks"}, "('MODELS', 'text-classification', 'ofa')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa_for_all_tasks.py", "imports": ["string", "math", "os", "functools", "re", "typing", "torch", "json"], "module": "modelscope.models.multi_modal.ofa_for_all_tasks"}, "('MODELS', 'auto-speech-recognition', 'ofa')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa_for_all_tasks.py", "imports": ["string", "math", "os", "functools", "re", "typing", "torch", "json"], "module": "modelscope.models.multi_modal.ofa_for_all_tasks"}, "('MODELS', 'sudoku', 'ofa')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa_for_all_tasks.py", "imports": ["string", "math", "os", "functools", "re", "typing", "torch", "json"], "module": "modelscope.models.multi_modal.ofa_for_all_tasks"}, "('MODELS', 'text2sql', 'ofa')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa_for_all_tasks.py", "imports": ["string", "math", "os", "functools", "re", "typing", "torch", "json"], "module": "modelscope.models.multi_modal.ofa_for_all_tasks"}, "('MODELS', 'generative-multi-modal-embedding', 'gemm-generative-multi-modal')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/gemm/gemm_model.py", "imports": ["PIL", "numpy", "os", "typing", "torch", "json", "torchvision"], "module": "modelscope.models.multi_modal.gemm.gemm_model"}, "('MODELS', 'image-to-video', 'image-to-video-model')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/image_to_video/image_to_video_model.py", "imports": ["copy", "os", "random", "typing", "torch"], "module": "modelscope.models.multi_modal.image_to_video.image_to_video_model"}, "('MODELS', 'video-multi-modal-embedding', 'video-clip-multi-modal-embedding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py", "imports": ["PIL", "numpy", "tempfile", "os", "random", "typing", "uuid", "torch", "urllib", "json", "decord"], "module": "modelscope.models.multi_modal.mmr.models.clip_for_mm_video_embedding"}, "('MODELS', 'visual-question-answering', 'mplug')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mplug_for_all_tasks.py", "imports": ["typing", "os"], "module": "modelscope.models.multi_modal.mplug_for_all_tasks"}, "('MODELS', 'image-captioning', 'mplug')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mplug_for_all_tasks.py", "imports": ["typing", "os"], "module": "modelscope.models.multi_modal.mplug_for_all_tasks"}, "('MODELS', 'image-text-retrieval', 'mplug')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mplug_for_all_tasks.py", "imports": ["typing", "os"], "module": "modelscope.models.multi_modal.mplug_for_all_tasks"}, "('MODELS', 'video-question-answering', 'hitea')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mplug_for_all_tasks.py", "imports": ["typing", "os"], "module": "modelscope.models.multi_modal.mplug_for_all_tasks"}, "('MODELS', 'video-captioning', 'hitea')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mplug_for_all_tasks.py", "imports": ["typing", "os"], "module": "modelscope.models.multi_modal.mplug_for_all_tasks"}, "('MODELS', 'text-to-image-synthesis', 'multi-stage-diffusion-text-to-image-synthesis')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/multi_stage_diffusion/model.py", "imports": ["PIL", "numpy", "math", "os", "typing", "torch", "json"], "module": "modelscope.models.multi_modal.multi_stage_diffusion.model"}, "('MODELS', 'text-to-image-synthesis', 'ofa')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa_for_text_to_image_synthesis_model.py", "imports": ["PIL", "numpy", "taming", "os", "typing", "torch", "pkg_resources", "json", "torchvision"], "module": "modelscope.models.multi_modal.ofa_for_text_to_image_synthesis_model"}, "('MODELS', 'document-vl-embedding', 'vldoc')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/vldoc/model.py", "imports": ["logging", "copy", "math", "os", "re", "sys", "torch", "json", "torchvision"], "module": "modelscope.models.multi_modal.vldoc.model"}, "('MODELS', 'text-classification', 'mgeo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mgeo/text_classification.py", "imports": ["torch"], "module": "modelscope.models.multi_modal.mgeo.text_classification"}, "('MODELS', 'nli', 'mgeo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mgeo/text_classification.py", "imports": ["torch"], "module": "modelscope.models.multi_modal.mgeo.text_classification"}, "('MODELS', 'sentiment-classification', 'mgeo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mgeo/text_classification.py", "imports": ["torch"], "module": "modelscope.models.multi_modal.mgeo.text_classification"}, "('MODELS', 'sentence-similarity', 'mgeo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mgeo/text_classification.py", "imports": ["torch"], "module": "modelscope.models.multi_modal.mgeo.text_classification"}, "('MODELS', 'zero-shot-classification', 'mgeo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mgeo/text_classification.py", "imports": ["torch"], "module": "modelscope.models.multi_modal.mgeo.text_classification"}, "('MODELS', 'text-ranking', 'mgeo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mgeo/text_ranking.py", "imports": ["torch"], "module": "modelscope.models.multi_modal.mgeo.text_ranking"}, "('MODELS', 'backbone', 'mgeo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mgeo/backbone.py", "imports": ["math", "os", "random", "dataclasses", "transformers", "typing", "torch", "warnings"], "module": "modelscope.models.multi_modal.mgeo.backbone"}, "('MODELS', 'token-classification', 'mgeo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mgeo/token_classification.py", "imports": ["torch"], "module": "modelscope.models.multi_modal.mgeo.token_classification"}, "('MODELS', 'part-of-speech', 'mgeo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mgeo/token_classification.py", "imports": ["torch"], "module": "modelscope.models.multi_modal.mgeo.token_classification"}, "('MODELS', 'word-segmentation', 'mgeo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mgeo/token_classification.py", "imports": ["torch"], "module": "modelscope.models.multi_modal.mgeo.token_classification"}, "('MODELS', 'image-captioning', 'clip-interrogator')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/clip_interrogator/model.py", "imports": ["math", "os", "requests", "safetensors", "PIL", "numpy", "tqdm", "time", "open_clip", "hashlib", "dataclasses", "typing", "transformers", "torch", "torchvision"], "module": "modelscope.models.multi_modal.clip_interrogator.model"}, "('MODELS', 'video-to-video', 'video-to-video-model')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/video_to_video/video_to_video_model.py", "imports": ["copy", "os", "random", "typing", "torch"], "module": "modelscope.models.multi_modal.video_to_video.video_to_video_model"}, "('MODELS', 'generative-multi-modal-embedding', 'rleg-generative-multi-modal')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/rleg/rleg.py", "imports": ["typing", "torch", "torchvision"], "module": "modelscope.models.multi_modal.rleg.rleg"}, "('MODELS', 'multi-modal-similarity', 'team-multi-modal-similarity')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/team/team_model.py", "imports": ["PIL", "numpy", "cv2", "tokenizers", "typing", "torch", "torchvision"], "module": "modelscope.models.multi_modal.team.team_model"}, "('MODELS', 'multi-modal-embedding', 'clip-multi-modal-embedding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/clip/model.py", "imports": ["numpy", "os", "typing", "torch", "collections", "json"], "module": "modelscope.models.multi_modal.clip.model"}, "('MODELS', 'efficient-diffusion-tuning', 'efficient-diffusion-tuning')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/efficient_diffusion_tuning/efficient_stable_diffusion.py", "imports": ["os", "functools", "transformers", "typing", "torch", "diffusers"], "module": "modelscope.models.multi_modal.efficient_diffusion_tuning.efficient_stable_diffusion"}, "('MODELS', 'keyword-spotting', 'speech_dfsmn_kws_char_farfield')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/kws/farfield/model.py", "imports": ["typing", "tempfile", "os"], "module": "modelscope.models.audio.kws.farfield.model"}, "('MODELS', 'keyword-spotting', 'speech_dfsmn_kws_char_farfield_iot')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/kws/farfield/model.py", "imports": ["typing", "tempfile", "os"], "module": "modelscope.models.audio.kws.farfield.model"}, "('MODELS', 'keyword-spotting', 'kws-kwsbp')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/kws/generic_key_word_spotting.py", "imports": ["typing", "os"], "module": "modelscope.models.audio.kws.generic_key_word_spotting"}, "('MODELS', 'keyword-spotting', 'speech_kws_fsmn_char_ctc_nearfield')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/kws/nearfield/model.py", "imports": ["tempfile", "os", "sys", "typing", "torch"], "module": "modelscope.models.audio.kws.nearfield.model"}, "('MODELS', 'acoustic-noise-suppression', 'speech_dfsmn_ans')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/ans/denoise_net.py", "imports": ["torch"], "module": "modelscope.models.audio.ans.denoise_net"}, "('MODELS', 'acoustic-noise-suppression', 'speech_frcrn_ans_cirm_16k')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/ans/frcrn.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.audio.ans.frcrn"}, "('MODELS', 'auto-speech-recognition', 'generic-asr')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/asr/generic_automatic_speech_recognition.py", "imports": ["typing", "os"], "module": "modelscope.models.audio.asr.generic_automatic_speech_recognition"}, "('MODELS', 'voice-activity-detection', 'generic-asr')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/asr/generic_automatic_speech_recognition.py", "imports": ["typing", "os"], "module": "modelscope.models.audio.asr.generic_automatic_speech_recognition"}, "('MODELS', 'language-score-prediction', 'generic-asr')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/asr/generic_automatic_speech_recognition.py", "imports": ["typing", "os"], "module": "modelscope.models.audio.asr.generic_automatic_speech_recognition"}, "('MODELS', 'speech-timestamp', 'generic-asr')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/asr/generic_automatic_speech_recognition.py", "imports": ["typing", "os"], "module": "modelscope.models.audio.asr.generic_automatic_speech_recognition"}, "('MODELS', 'auto-speech-recognition', 'wenet-asr')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/asr/wenet_automatic_speech_recognition.py", "imports": ["wenetruntime", "json", "typing", "os"], "module": "modelscope.models.audio.asr.wenet_automatic_speech_recognition"}, "('MODELS', 'text-to-speech', 'sambert-hifigan')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/tts/sambert_hifi.py", "imports": ["numpy", "__future__", "os", "shutil", "matplotlib", "yaml", "json", "wave", "zipfile", "datetime"], "module": "modelscope.models.audio.tts.sambert_hifi"}, "('MODELS', 'inverse-text-processing', 'generic-itn')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/itn/generic_inverse_text_processing.py", "imports": ["typing", "os"], "module": "modelscope.models.audio.itn.generic_inverse_text_processing"}, "('MODELS', 'speaker-verification', 'eres2net-aug-sv')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/ERes2Net_aug.py", "imports": ["math", "os", "torchaudio", "typing", "torch"], "module": "modelscope.models.audio.sv.ERes2Net_aug"}, "('HEADS', 'speaker-diarization-dialogue-detection', 'text-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/speaker_diarization_dialogue_detection.py", "imports": ["torch"], "module": "modelscope.models.audio.sv.speaker_diarization_dialogue_detection"}, "('MODELS', 'speaker-diarization-dialogue-detection', 'text-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/speaker_diarization_dialogue_detection.py", "imports": ["torch"], "module": "modelscope.models.audio.sv.speaker_diarization_dialogue_detection"}, "('MODELS', 'speaker-diarization-dialogue-detection', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/speaker_diarization_dialogue_detection.py", "imports": ["torch"], "module": "modelscope.models.audio.sv.speaker_diarization_dialogue_detection"}, "('MODELS', 'speech-language-recognition', 'cam++-lre')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/lanuage_recognition_model.py", "imports": ["numpy", "os", "torchaudio", "typing", "torch"], "module": "modelscope.models.audio.sv.lanuage_recognition_model"}, "('MODELS', 'speaker-verification', 'eres2net-sv')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/ERes2Net.py", "imports": ["math", "os", "torchaudio", "typing", "torch"], "module": "modelscope.models.audio.sv.ERes2Net"}, "('MODELS', 'speaker-verification', 'ecapa-tdnn-sv')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/ecapa_tdnn.py", "imports": ["numpy", "math", "os", "torchaudio", "typing", "torch"], "module": "modelscope.models.audio.sv.ecapa_tdnn"}, "('MODELS', 'speaker-verification', 'rdino_ecapa-tdnn-sv')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/rdino.py", "imports": ["math", "os", "torchaudio", "typing", "torch"], "module": "modelscope.models.audio.sv.rdino"}, "('HEADS', 'speaker-diarization-semantic-speaker-turn-detection', 'token-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/speaker_diarization_semantic_speaker_turn_detection.py", "imports": ["torch"], "module": "modelscope.models.audio.sv.speaker_diarization_semantic_speaker_turn_detection"}, "('MODELS', 'speaker-diarization-semantic-speaker-turn-detection', 'token-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/speaker_diarization_semantic_speaker_turn_detection.py", "imports": ["torch"], "module": "modelscope.models.audio.sv.speaker_diarization_semantic_speaker_turn_detection"}, "('MODELS', 'speaker-diarization-semantic-speaker-turn-detection', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/speaker_diarization_semantic_speaker_turn_detection.py", "imports": ["torch"], "module": "modelscope.models.audio.sv.speaker_diarization_semantic_speaker_turn_detection"}, "('MODELS', 'speaker-diarization', 'scl-sd')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/speaker_change_locator.py", "imports": ["numpy", "os", "torchaudio", "typing", "torch", "collections"], "module": "modelscope.models.audio.sv.speaker_change_locator"}, "('MODELS', 'speaker-verification', 'cam++-sv')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/DTDNN.py", "imports": ["numpy", "os", "torchaudio", "typing", "torch", "collections"], "module": "modelscope.models.audio.sv.DTDNN"}, "('MODELS', 'speaker-diarization', 'cluster-backend')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/cluster_backend.py", "imports": ["numpy", "umap", "sklearn", "typing", "scipy", "hdbscan"], "module": "modelscope.models.audio.sv.cluster_backend"}, "('MODELS', 'speaker-verification', 'generic-sv')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/generic_speaker_verification.py", "imports": ["typing", "os"], "module": "modelscope.models.audio.sv.generic_speaker_verification"}, "('MODELS', 'speaker-diarization', 'generic-sv')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/generic_speaker_verification.py", "imports": ["typing", "os"], "module": "modelscope.models.audio.sv.generic_speaker_verification"}, "('MODELS', 'punctuation', 'generic-punc')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/punc/generic_punctuation.py", "imports": ["typing", "os"], "module": "modelscope.models.audio.punc.generic_punctuation"}, "('MODELS', 'speech-separation', 'speech_mossformer_separation_temporal_8k')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/separation/mossformer.py", "imports": ["typing", "copy", "torch", "os"], "module": "modelscope.models.audio.separation.mossformer"}, "('MODELS', 'image-depth-estimation', 'newcrfs-depth-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_depth_estimation/newcrfs_model.py", "imports": ["numpy", "torch", "os"], "module": "modelscope.models.cv.image_depth_estimation.newcrfs_model"}, "('HEADS', 'default', 'MaskFormerSemanticHead')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/pan_merge/maskformer_semantic_head.py", "imports": ["mmdet", "torch"], "module": "modelscope.models.cv.image_semantic_segmentation.pan_merge.maskformer_semantic_head"}, "('MODELS', 'image-segmentation', 'swinL-semantic-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/semantic_seg_model.py", "imports": ["numpy", "torch", "os"], "module": "modelscope.models.cv.image_semantic_segmentation.semantic_seg_model"}, "('MODELS', 'image-segmentation', 'vitadapter-semantic-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/semantic_seg_model.py", "imports": ["numpy", "torch", "os"], "module": "modelscope.models.cv.image_semantic_segmentation.semantic_seg_model"}, "('PIPELINES', 'default', 'ResizeToMultiple')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/vit_adapter/utils/data_process_func.py", "imports": ["mmcv", "mmdet"], "module": "modelscope.models.cv.image_semantic_segmentation.vit_adapter.utils.data_process_func"}, "('DETECTORS', 'default', 'EncoderDecoderMask2Former')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/vit_adapter/models/segmentors/encoder_decoder_mask2former.py", "imports": ["mmdet", "torch"], "module": "modelscope.models.cv.image_semantic_segmentation.vit_adapter.models.segmentors.encoder_decoder_mask2former"}, "('HEADS', 'default', 'Mask2FormerHeadFromMMSeg')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/vit_adapter/models/decode_heads/mask2former_head_from_mmseg.py", "imports": ["mmcv", "copy", "mmdet", "torch"], "module": "modelscope.models.cv.image_semantic_segmentation.vit_adapter.models.decode_heads.mask2former_head_from_mmseg"}, "('BACKBONES', 'default', 'BASEBEiT')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/vit_adapter/models/backbone/base/beit.py", "imports": ["timm", "mmdet", "torch", "math", "mmcv", "functools"], "module": "modelscope.models.cv.image_semantic_segmentation.vit_adapter.models.backbone.base.beit"}, "('BACKBONES', 'default', 'BEiTAdapter')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/vit_adapter/models/backbone/beit_adapter.py", "imports": ["logging", "timm", "math", "mmdet", "torch"], "module": "modelscope.models.cv.image_semantic_segmentation.vit_adapter.models.backbone.beit_adapter"}, "('MODELS', 'semantic-segmentation', 'ddpm')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/ddpm_segmentation_model.py", "imports": ["ddpm_guided_diffusion", "typing", "torch", "os"], "module": "modelscope.models.cv.image_semantic_segmentation.ddpm_segmentation_model"}, "('MODELS', 'image-quality-assessment-degradation', 'image-quality-assessment-degradation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_quality_assessment_degradation/image_quality_assessment_degradation.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.image_quality_assessment_degradation.image_quality_assessment_degradation"}, "('MODELS', 'image-segmentation', 'swinL-panoptic-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_panoptic_segmentation/panseg_model.py", "imports": ["torch", "os"], "module": "modelscope.models.cv.image_panoptic_segmentation.panseg_model"}, "('MODELS', 'image-quality-assessment-mos', 'image-quality-assessment-man')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_quality_assessment_man/image_quality_assessment_man.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.image_quality_assessment_man.image_quality_assessment_man"}, "('MODELS', 'video-summarization', 'pgl-video-summarization')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_summarization/summarizer.py", "imports": ["numpy", "torch", "typing", "os"], "module": "modelscope.models.cv.video_summarization.summarizer"}, "('MODELS', 'body-2d-keypoints', 'body-2d-keypoints')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/body_2d_keypoints/hrnet_v2.py", "imports": ["numpy", "torch", "os"], "module": "modelscope.models.cv.body_2d_keypoints.hrnet_v2"}, "('MODELS', 'image-fewshot-detection', 'defrcn')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_defrcn_fewshot/defrcn_for_fewshot.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.image_defrcn_fewshot.defrcn_for_fewshot"}, "('MODELS', 'object-detection-3d', 'depe')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/depe_detect.py", "imports": ["numpy", "torch", "typing", "os"], "module": "modelscope.models.cv.object_detection_3d.depe.depe_detect"}, "('BBOX_CODERS', 'default', 'NMSFreeCoder')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/core/bbox/coders/nms_free_coder.py", "imports": ["mmdet", "torch"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.core.bbox.coders.nms_free_coder"}, "('MATCH_COST', 'default', 'BBox3DL1Cost')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/core/bbox/match_costs/match_cost.py", "imports": ["mmdet", "torch"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.core.bbox.match_costs.match_cost"}, "('BBOX_ASSIGNERS', 'default', 'HungarianAssigner3D')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/core/bbox/assigners/hungarian_assigner_3d.py", "imports": ["scipy", "mmdet", "torch"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.core.bbox.assigners.hungarian_assigner_3d"}, "('DETECTORS', 'default', 'Petr3D')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/models/detectors/petr3d.py", "imports": ["mmdet3d", "numpy", "mmdet", "torch", "mmcv"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.models.detectors.petr3d"}, "('HEADS', 'default', 'PETRv2DEDNHead')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/models/dense_heads/petrv2_dednhead.py", "imports": ["mmdet3d", "numpy", "copy", "math", "mmdet", "torch", "mmcv"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.models.dense_heads.petrv2_dednhead"}, "('POSITIONAL_ENCODING', 'default', 'SinePositionalEncoding3D')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/models/utils/positional_encoding.py", "imports": ["mmcv", "torch", "math"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.models.utils.positional_encoding"}, "('TRANSFORMER', 'default', 'PETRDNTransformer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/models/utils/petr_transformer.py", "imports": ["typing", "copy", "mmdet", "torch", "math", "warnings", "mmcv"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.models.utils.petr_transformer"}, "('TRANSFORMER_LAYER', 'default', 'PETRTransformerDecoderLayer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/models/utils/petr_transformer.py", "imports": ["typing", "copy", "mmdet", "torch", "math", "warnings", "mmcv"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.models.utils.petr_transformer"}, "('ATTENTION', 'default', 'PETRMultiheadAttention')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/models/utils/petr_transformer.py", "imports": ["typing", "copy", "mmdet", "torch", "math", "warnings", "mmcv"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.models.utils.petr_transformer"}, "('TRANSFORMER_LAYER_SEQUENCE', 'default', 'PETRTransformerEncoder')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/models/utils/petr_transformer.py", "imports": ["typing", "copy", "mmdet", "torch", "math", "warnings", "mmcv"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.models.utils.petr_transformer"}, "('TRANSFORMER_LAYER_SEQUENCE', 'default', 'PETRTransformerDecoder')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/models/utils/petr_transformer.py", "imports": ["typing", "copy", "mmdet", "torch", "math", "warnings", "mmcv"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.models.utils.petr_transformer"}, "('BACKBONES', 'default', 'VoVNet')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/models/backbones/vovnet.py", "imports": ["mmcv", "collections", "mmdet", "torch"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.models.backbones.vovnet"}, "('NECKS', 'default', 'CPFPN')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/models/necks/cp_fpn.py", "imports": ["mmcv", "mmdet", "torch"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.models.necks.cp_fpn"}, "('PIPELINES', 'default', 'LoadMultiViewImageFromMultiSweepsFiles')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/datasets/pipelines/loading.py", "imports": ["mmcv", "numpy", "mmdet"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.datasets.pipelines.loading"}, "('PIPELINES', 'default', 'PadMultiViewImage')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/datasets/pipelines/transform_3d.py", "imports": ["mmdet3d", "numpy", "copy", "PIL", "mmdet", "torch", "mmcv"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.datasets.pipelines.transform_3d"}, "('PIPELINES', 'default', 'NormalizeMultiviewImage')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/datasets/pipelines/transform_3d.py", "imports": ["mmdet3d", "numpy", "copy", "PIL", "mmdet", "torch", "mmcv"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.datasets.pipelines.transform_3d"}, "('PIPELINES', 'default', 'ResizeCropFlipImage')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/datasets/pipelines/transform_3d.py", "imports": ["mmdet3d", "numpy", "copy", "PIL", "mmdet", "torch", "mmcv"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.datasets.pipelines.transform_3d"}, "('DATASETS', 'default', 'CustomNuScenesDataset')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/datasets/nuscenes_dataset.py", "imports": ["mmdet3d", "numpy", "mmdet"], "module": "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.datasets.nuscenes_dataset"}, "('MODELS', 'image-face-fusion', 'image-face-fusion')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_face_fusion/image_face_fusion.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "collections", "torchvision"], "module": "modelscope.models.cv.image_face_fusion.image_face_fusion"}, "('MODELS', 'image-deblurring', 'nafnet')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_deblur/nafnet_for_image_deblur.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.image_deblur.nafnet_for_image_deblur"}, "('MODELS', 'image-matching', 'quadtree-attention-image-matching')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_matching/quadtree_attention_model.py", "imports": ["numpy", "cv2", "os", "pathlib", "torch"], "module": "modelscope.models.cv.image_matching.quadtree_attention_model"}, "('MODELS', 'video-super-resolution', 'real-basicvsr')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_super_resolution/real_basicvsr_for_video_super_resolution.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.video_super_resolution.real_basicvsr_for_video_super_resolution"}, "('MODELS', 'video-super-resolution', 'msrresnet-lite')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_super_resolution/msrresnet_lite_model.py", "imports": ["functools", "torch", "typing", "os"], "module": "modelscope.models.cv.video_super_resolution.msrresnet_lite_model"}, "('MODELS', 'image-classification', 'ClassificationModel')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_classification/mmcls_model.py", "imports": ["os"], "module": "modelscope.models.cv.image_classification.mmcls_model"}, "('BACKBONES', 'default', 'BEiTv2')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_classification/backbones/beit_v2.py", "imports": ["einops", "math", "itertools", "os", "functools", "mmcls", "typing", "torch", "warnings", "mmcv", "collections"], "module": "modelscope.models.cv.image_classification.backbones.beit_v2"}, "('BACKBONES', 'default', 'NextViT')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_classification/backbones/nextvit.py", "imports": ["einops", "math", "itertools", "os", "functools", "mmcls", "typing", "torch", "warnings", "mmcv", "collections"], "module": "modelscope.models.cv.image_classification.backbones.nextvit"}, "('MODELS', 'image-classification', 'content-check')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_classification/resnet50_cc.py", "imports": ["math", "os", "torch", "collections", "torchvision"], "module": "modelscope.models.cv.image_classification.resnet50_cc"}, "('MODELS', 'face-reconstruction', 'face_reconstruction')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_reconstruction/models/facerecon_model.py", "imports": ["numpy", "cv2", "os", "torch", "collections"], "module": "modelscope.models.cv.face_reconstruction.models.facerecon_model"}, "('MODELS', 'text-driven-segmentation', 'text-driven-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/text_driven_segmentation/lseg_model.py", "imports": ["PIL", "numpy", "os", "typing", "torch", "json"], "module": "modelscope.models.cv.text_driven_segmentation.lseg_model"}, "('MODELS', 'hand-static', 'hand-static')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/hand_static/hand_model.py", "imports": ["PIL", "numpy", "cv2", "os", "sys", "torch", "torchvision"], "module": "modelscope.models.cv.hand_static.hand_model"}, "('MODELS', 'movie-scene-segmentation', 'resnet50-bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/movie_scene_segmentation/model.py", "imports": ["PIL", "numpy", "tqdm", "einops", "math", "os", "typing", "torch", "shotdetect_scenedetect_lgss", "torchvision"], "module": "modelscope.models.cv.movie_scene_segmentation.model"}, "('MODELS', 'image-skychange', 'image-skychange')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_skychange/skychange_model.py", "imports": ["pdb", "math", "os", "cv2", "time", "typing", "torch", "collections", "json"], "module": "modelscope.models.cv.image_skychange.skychange_model"}, "('PREPROCESSORS', 'cv', 'image-sky-change-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_skychange/preprocessor.py", "imports": ["numpy", "pdb", "cv2", "typing", "torch", "numbers", "json", "torchvision"], "module": "modelscope.models.cv.image_skychange.preprocessor"}, "('MODELS', 'image-inpainting', 'FFTInpainting')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_inpainting/model.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.image_inpainting.model"}, "('MODELS', 'nerf-recon-acc', 'nerf-recon-acc')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_acc/nerf_recon_acc.py", "imports": ["tqdm", "numpy", "cv2", "os", "time", "glob", "torch"], "module": "modelscope.models.cv.nerf_recon_acc.nerf_recon_acc"}, "('PREPROCESSORS', 'cv', 'nerf-recon-acc-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_4k/nerf_preprocess.py", "imports": ["numpy", "cv2", "os", "subprocess", "tensorflow", "typing", "glob"], "module": "modelscope.models.cv.nerf_recon_4k.nerf_preprocess"}, "('MODELS', 'nerf-recon-4k', 'nerf-recon-4k')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_4k/nerf_recon_4k.py", "imports": ["tqdm", "numpy", "os", "argparse", "time", "imageio", "random", "torch", "mmcv"], "module": "modelscope.models.cv.nerf_recon_4k.nerf_recon_4k"}, "('MODELS', 'open-vocabulary-detection', 'open-vocabulary-detection-vild')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/open_vocabulary_detection_vild/vild.py", "imports": ["numpy", "os", "tensorflow", "clip", "typing", "torch", "scipy"], "module": "modelscope.models.cv.open_vocabulary_detection_vild.vild"}, "('MODELS', 'pointcloud-sceneflow-estimation', 'rcp-sceneflow-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/pointcloud_sceneflow_estimation/rcp_model.py", "imports": ["numpy", "torch", "os"], "module": "modelscope.models.cv.pointcloud_sceneflow_estimation.rcp_model"}, "('MODELS', 'video-text-retrieval', 'vop-retrieval-model')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vop_retrieval/model.py", "imports": ["torch", "os"], "module": "modelscope.models.cv.vop_retrieval.model"}, "('MODELS', 'video-text-retrieval', 'vop-retrieval-model-se')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vop_retrieval/model_se.py", "imports": ["torch", "os"], "module": "modelscope.models.cv.vop_retrieval.model_se"}, "('MODELS', 'video-object-detection', 'realtime-video-object-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/stream_yolo/realtime_video_detector.py", "imports": ["logging", "tqdm", "numpy", "cv2", "time", "argparse", "os", "torch", "json"], "module": "modelscope.models.cv.stream_yolo.realtime_video_detector"}, "('MODELS', 'face-attribute-recognition', 'fairface')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_attribute_recognition/fair_face/face_attribute_recognition.py", "imports": ["PIL", "numpy", "cv2", "os", "torch", "torchvision"], "module": "modelscope.models.cv.face_attribute_recognition.fair_face.face_attribute_recognition"}, "('MODELS', 'video-depth-estimation', 'dro-resnet18-depth-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/dro_model.py", "imports": ["tqdm", "numpy", "cv2", "os", "glob", "torch"], "module": "modelscope.models.cv.video_depth_estimation.dro_model"}, "('MODELS', 'indoor-layout-estimation', 'panovit-layout-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/indoor_layout_estimation/panovit.py", "imports": ["yacs", "numpy", "torch", "os"], "module": "modelscope.models.cv.indoor_layout_estimation.panovit"}, "('MODELS', 'video-object-detection', 'longshortnet')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_streaming_perception/longshortnet/longshortnet.py", "imports": ["logging", "tqdm", "numpy", "cv2", "time", "argparse", "os", "torch", "json"], "module": "modelscope.models.cv.video_streaming_perception.longshortnet.longshortnet"}, "('MATCH_COST', 'default', 'MaskCost')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/track/mask_hungarian_assigner.py", "imports": ["scipy", "numpy", "mmdet", "torch"], "module": "modelscope.models.cv.video_instance_segmentation.track.mask_hungarian_assigner"}, "('BBOX_ASSIGNERS', 'default', 'MaskHungarianAssignerVideo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/track/mask_hungarian_assigner.py", "imports": ["scipy", "numpy", "mmdet", "torch"], "module": "modelscope.models.cv.video_instance_segmentation.track.mask_hungarian_assigner"}, "('HEADS', 'default', 'KernelUpdateHeadVideo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/track/kernel_update_head.py", "imports": ["mmcv", "numpy", "mmdet", "torch"], "module": "modelscope.models.cv.video_instance_segmentation.track.kernel_update_head"}, "('NECKS', 'default', 'MSDeformAttnPixelDecoder')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/neck/msdeformattn_decoder.py", "imports": ["mmcv", "mmdet", "torch"], "module": "modelscope.models.cv.video_instance_segmentation.neck.msdeformattn_decoder"}, "('TRANSFORMER_LAYER', 'default', 'KernelUpdator')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/head/kernel_updator.py", "imports": ["mmcv", "torch"], "module": "modelscope.models.cv.video_panoptic_segmentation.head.kernel_updator"}, "('HEADS', 'default', 'KernelIterHeadVideo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/head/kernel_iter_head.py", "imports": ["mmdet", "torch"], "module": "modelscope.models.cv.video_instance_segmentation.head.kernel_iter_head"}, "('HEADS', 'default', 'ConvKernelHeadVideo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/head/kernel_head.py", "imports": ["mmcv", "mmdet", "torch"], "module": "modelscope.models.cv.video_instance_segmentation.head.kernel_head"}, "('HEADS', 'default', 'KernelUpdateHead')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/head/kernel_update_head.py", "imports": ["mmcv", "numpy", "mmdet", "torch"], "module": "modelscope.models.cv.video_instance_segmentation.head.kernel_update_head"}, "('HEADS', 'default', 'KernelFrameIterHeadVideo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/head/kernel_frame_iter_head.py", "imports": ["mmcv", "mmdet", "torch"], "module": "modelscope.models.cv.video_instance_segmentation.head.kernel_frame_iter_head"}, "('MODELS', 'video-instance-segmentation', 'swinb-video-instance-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/video_knet.py", "imports": ["mmdet", "torch"], "module": "modelscope.models.cv.video_instance_segmentation.video_knet"}, "('MODELS', 'image-color-enhancement', 'csrnet')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_color_enhance/image_color_enhance.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.image_color_enhance.image_color_enhance"}, "('MODELS', 'image-color-enhancement', 'deeplpfnet')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_color_enhance/deeplpf/deeplpf_image_color_enhance.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.image_color_enhance.deeplpf.deeplpf_image_color_enhance"}, "('MODELS', 'image-color-enhancement', 'adaint')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_color_enhance/adaint/adaint.py", "imports": ["os", "typing", "torch", "numbers", "torchvision"], "module": "modelscope.models.cv.image_color_enhance.adaint.adaint"}, "('MODELS', 'image-classification', 'bnext')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_binary_quant_classification/binary_quant_model.py", "imports": ["collections", "torch", "os"], "module": "modelscope.models.cv.image_binary_quant_classification.binary_quant_model"}, "('MODELS', 'referring-video-object-segmentation', 'swinT-referring-video-object-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/referring_video_object_segmentation/model.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.referring_video_object_segmentation.model"}, "('MODELS', 'nerf-recon-vq-compression', 'nerf-recon-vq-compression')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_vq_compression/nerf_recon_vq_compression.py", "imports": ["tqdm", "numpy", "cv2", "os", "time", "functools", "glob", "torch"], "module": "modelscope.models.cv.nerf_recon_vq_compression.nerf_recon_vq_compression"}, "('MODELS', 'bad-image-detecting', 'bad-image-detecting')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/bad_image_detecting/bad_image_detecting.py", "imports": ["numpy", "os", "typing", "torch", "torchvision"], "module": "modelscope.models.cv.bad_image_detecting.bad_image_detecting"}, "('MODELS', 'image-paintbyexample', 'Stablediffusion-Paintbyexample')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_paintbyexample/model.py", "imports": ["omegaconf", "os", "paint_ldm", "typing", "torch"], "module": "modelscope.models.cv.image_paintbyexample.model"}, "('MODELS', 'video-object-segmentation', 'video-object-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_object_segmentation/model.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.video_object_segmentation.model"}, "('MODELS', 'ocr-recognition', 'OCRRecognition')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_recognition/model.py", "imports": ["torch", "os"], "module": "modelscope.models.cv.ocr_recognition.model"}, "('PREPROCESSORS', 'cv', 'ocr-recognition')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_recognition/preprocessor.py", "imports": ["PIL", "numpy", "cv2", "os", "torch"], "module": "modelscope.models.cv.ocr_recognition.preprocessor"}, "('MODELS', 'face-human-hand-detection', 'face-human-hand-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_human_hand_detection/det_infer.py", "imports": ["numpy", "torch", "cv2"], "module": "modelscope.models.cv.face_human_hand_detection.det_infer"}, "('MODELS', 'image-demoireing', 'image-restoration')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_restoration/image_restoration_model.py", "imports": ["numpy", "torch", "cv2", "os"], "module": "modelscope.models.cv.image_restoration.image_restoration_model"}, "('MODELS', 'video-human-matting', 'video-human-matting')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_human_matting/model.py", "imports": ["numpy", "os", "typing", "torch", "torchvision"], "module": "modelscope.models.cv.video_human_matting.model"}, "('MODELS', 'human-detection', 'detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_model.py", "imports": ["numpy", "torch", "os"], "module": "modelscope.models.cv.object_detection.mmdet_model"}, "('MODELS', 'image-object-detection', 'detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_model.py", "imports": ["numpy", "torch", "os"], "module": "modelscope.models.cv.object_detection.mmdet_model"}, "('HEADS', 'default', 'FCNMaskNHead')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_ms/roi_heads/mask_heads/fcn_mask_head.py", "imports": ["numpy", "mmdet", "torch", "warnings", "mmcv"], "module": "modelscope.models.cv.object_detection.mmdet_ms.roi_heads.mask_heads.fcn_mask_head"}, "('HEADS', 'default', 'ConvFCBBoxNHead')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_ms/roi_heads/bbox_heads/convfc_bbox_head.py", "imports": ["mmdet", "torch"], "module": "modelscope.models.cv.object_detection.mmdet_ms.roi_heads.bbox_heads.convfc_bbox_head"}, "('HEADS', 'default', 'Shared2FCBBoxNHead')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_ms/roi_heads/bbox_heads/convfc_bbox_head.py", "imports": ["mmdet", "torch"], "module": "modelscope.models.cv.object_detection.mmdet_ms.roi_heads.bbox_heads.convfc_bbox_head"}, "('HEADS', 'default', 'Shared4Conv1FCBBoxNHead')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_ms/roi_heads/bbox_heads/convfc_bbox_head.py", "imports": ["mmdet", "torch"], "module": "modelscope.models.cv.object_detection.mmdet_ms.roi_heads.bbox_heads.convfc_bbox_head"}, "('HEADS', 'default', 'RPNNHead')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_ms/dense_heads/rpn_head.py", "imports": ["mmcv", "copy", "mmdet", "torch"], "module": "modelscope.models.cv.object_detection.mmdet_ms.dense_heads.rpn_head"}, "('HEADS', 'default', 'AnchorNHead')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_ms/dense_heads/anchor_head.py", "imports": ["mmdet"], "module": "modelscope.models.cv.object_detection.mmdet_ms.dense_heads.anchor_head"}, "('BACKBONES', 'default', 'ViT')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_ms/backbones/vit.py", "imports": ["timm", "math", "functools", "mmdet", "torch"], "module": "modelscope.models.cv.object_detection.mmdet_ms.backbones.vit"}, "('NECKS', 'default', 'FPNF')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_ms/necks/fpn.py", "imports": ["mmcv", "mmdet", "torch"], "module": "modelscope.models.cv.object_detection.mmdet_ms.necks.fpn"}, "('MODELS', 'image-segmentation', 'vision-middleware')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vision_middleware/model.py", "imports": ["typing", "json", "torch", "os"], "module": "modelscope.models.cv.vision_middleware.model"}, "('MODELS', 'controllable-image-generation', 'controllable-image-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/controlnet.py", "imports": ["control_ldm", "numpy", "PIL", "tempfile", "math", "os", "cv2", "einops", "random", "sys", "typing", "torch"], "module": "modelscope.models.cv.controllable_image_generation.controlnet"}, "('MODELS', 'image-denoising', 'nafnet')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_denoise/nafnet_for_image_denoise.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.image_denoise.nafnet_for_image_denoise"}, "('MODELS', 'image-try-on', 'image-try-on')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_try_on/try_on_infer.py", "imports": ["PIL", "numpy", "cv2", "os", "argparse", "yaml", "torch", "torchvision"], "module": "modelscope.models.cv.image_try_on.try_on_infer"}, "('MODELS', 'image-colorization', 'ddcolor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_colorization/ddcolor/ddcolor_for_image_colorization.py", "imports": ["numpy", "copy", "os", "typing", "torch"], "module": "modelscope.models.cv.image_colorization.ddcolor.ddcolor_for_image_colorization"}, "('MODELS', 'shop-segmentation', 'shop-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/shop_segmentation/shop_seg_model.py", "imports": ["PIL", "numpy", "os", "typing", "torch", "json"], "module": "modelscope.models.cv.shop_segmentation.shop_seg_model"}, "('MODELS', 'language-guided-video-summarization', 'clip-it-language-guided-video-summarization')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/language_guided_video_summarization/summarizer.py", "imports": ["numpy", "os", "argparse", "bmt_clipit", "typing", "torch", "videofeatures_clipit"], "module": "modelscope.models.cv.language_guided_video_summarization.summarizer"}, "('MODELS', 'video-frame-interpolation', 'video-frame-interpolation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_frame_interpolation/VFINet_for_video_frame_interpolation.py", "imports": ["typing", "copy", "torch", "os"], "module": "modelscope.models.cv.video_frame_interpolation.VFINet_for_video_frame_interpolation"}, "('MODELS', 'human-reconstruction', 'human-reconstruction')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/human_reconstruction/Reconstruction.py", "imports": ["PIL", "numpy", "cv2", "os", "skimage", "typing", "torch", "torchvision"], "module": "modelscope.models.cv.human_reconstruction.Reconstruction"}, "('MODELS', 'image-classification', 'EasyRobustModel')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/robust_image_classification/easyrobust_model.py", "imports": ["torch", "os"], "module": "modelscope.models.cv.robust_image_classification.easyrobust_model"}, "('MODELS', 'image-debanding', 'rrdb')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_debanding/rrdb/rrdb_image_debanding.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.image_debanding.rrdb.rrdb_image_debanding"}, "('MODELS', 'vision-efficient-tuning', 'vision-efficient-tuning')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vision_efficient_tuning/model.py", "imports": ["typing", "torch"], "module": "modelscope.models.cv.vision_efficient_tuning.model"}, "('MODELS', 'panorama-depth-estimation', 'unifuse-depth-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/panorama_depth_estimation/unifuse_model.py", "imports": ["numpy", "torchvision", "torch", "os"], "module": "modelscope.models.cv.panorama_depth_estimation.unifuse_model"}, "('MODELS', 'image-object-detection', 'vidt')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vidt/model.py", "imports": ["torch", "os"], "module": "modelscope.models.cv.vidt.model"}, "('MODELS', 'image-quality-assessment-mos', 'image-quality-assessment-mos')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_quality_assessment_mos/image_quality_assessment_mos.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.image_quality_assessment_mos.image_quality_assessment_mos"}, "('MODELS', 'video-stabilization', 'video-stabilization')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/DUTRAFTStabilizer.py", "imports": ["numpy", "tempfile", "cv2", "math", "os", "sys", "typing", "torch"], "module": "modelscope.models.cv.video_stabilization.DUTRAFTStabilizer"}, "('MODELS', 'image-portrait-enhancement', 'gpen')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_portrait_enhancement/image_portrait_enhancement.py", "imports": ["typing", "torch", "math", "os"], "module": "modelscope.models.cv.image_portrait_enhancement.image_portrait_enhancement"}, "('MODELS', 'facial-expression-recognition', 'fer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/facial_expression_recognition/fer/facial_expression_recognition.py", "imports": ["PIL", "numpy", "cv2", "os", "torch"], "module": "modelscope.models.cv.facial_expression_recognition.fer.facial_expression_recognition"}, "('MODELS', 'product-retrieval-embedding', 'product-retrieval-embedding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/product_retrieval_embedding/item_model.py", "imports": ["numpy", "torch", "typing", "os"], "module": "modelscope.models.cv.product_retrieval_embedding.item_model"}, "('MODELS', 'video-inpainting', 'video-inpainting')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_inpainting/inpainting_model.py", "imports": ["numpy", "torch", "math", "torchvision"], "module": "modelscope.models.cv.video_inpainting.inpainting_model"}, "('MODELS', 'image-multi-view-depth-estimation', 'image-casmvs-depth-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_mvs_depth_estimation/casmvs_model.py", "imports": ["numpy", "cv2", "os", "torch", "easydict"], "module": "modelscope.models.cv.image_mvs_depth_estimation.casmvs_model"}, "('MODELS', 'image-object-detection', 'MaskScoring')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/abnormal_object_detection/mmdet_model.py", "imports": ["numpy", "torch", "os"], "module": "modelscope.models.cv.abnormal_object_detection.mmdet_model"}, "('ROI_EXTRACTORS', 'default', 'SingleRoINExtractor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/abnormal_object_detection/mmdet_ms/roi_head/roi_extractors/single_level_roi_extractor.py", "imports": ["mmcv", "mmdet", "torch"], "module": "modelscope.models.cv.abnormal_object_detection.mmdet_ms.roi_head.roi_extractors.single_level_roi_extractor"}, "('HEADS', 'default', 'MaskScoringNRoIHead')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/abnormal_object_detection/mmdet_ms/roi_head/mask_scoring_roi_head.py", "imports": ["mmdet", "torch"], "module": "modelscope.models.cv.abnormal_object_detection.mmdet_ms.roi_head.mask_scoring_roi_head"}, "('MODELS', 'image-body-reshaping', 'image-body-reshaping')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_body_reshaping/image_body_reshaping.py", "imports": ["numpy", "cv2", "os", "typing", "torch"], "module": "modelscope.models.cv.image_body_reshaping.image_body_reshaping"}, "('MODELS', 'crowd-counting', 'HRNetCrowdCounting')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/crowd_counting/cc_model.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.crowd_counting.cc_model"}, "('MODELS', 'image-classification', 'image-probing-model')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_probing_model/model.py", "imports": ["typing", "json", "torch", "os"], "module": "modelscope.models.cv.image_probing_model.model"}, "('MODELS', 'semantic-segmentation', 'detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/salient_detection/salient_model.py", "imports": ["PIL", "cv2", "os", "torch", "torchvision"], "module": "modelscope.models.cv.salient_detection.salient_model"}, "('MODELS', 'lineless-table-recognition', 'LoreModel')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/table_recognition/model_lore.py", "imports": ["numpy", "copy", "math", "os", "typing", "torch"], "module": "modelscope.models.cv.table_recognition.model_lore"}, "('MODELS', 'image-reid-person', 'passvitb')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_reid_person/pass_model.py", "imports": ["torch", "enum", "os"], "module": "modelscope.models.cv.image_reid_person.pass_model"}, "('MODELS', 'video-panoptic-segmentation', 'swinb-video-panoptic-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/video_k_net.py", "imports": ["mmcv", "numpy", "mmdet", "torch"], "module": "modelscope.models.cv.video_panoptic_segmentation.video_k_net"}, "('TRACKERS', 'default', 'QuasiDenseEmbedTracker')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/track/quasi_dense_embed_tracker.py", "imports": ["mmcv", "mmdet", "torch"], "module": "modelscope.models.cv.video_panoptic_segmentation.track.quasi_dense_embed_tracker"}, "('HEADS', 'default', 'VideoKernelIterHead')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/head/kernel_iter_head.py", "imports": ["mmdet", "torch"], "module": "modelscope.models.cv.video_panoptic_segmentation.head.kernel_iter_head"}, "('HEADS', 'default', 'VideoKernelUpdateHead')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/head/kernel_update_head.py", "imports": ["mmcv", "numpy", "mmdet", "torch"], "module": "modelscope.models.cv.video_panoptic_segmentation.head.kernel_update_head"}, "('NECKS', 'default', 'SemanticFPNWrapper')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/head/semantic_fpn_wrapper.py", "imports": ["mmcv", "mmdet", "torch"], "module": "modelscope.models.cv.video_panoptic_segmentation.head.semantic_fpn_wrapper"}, "('MODELS', 'face-recognition', 'rts-backbone')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_recognition/torchkit/rts_backbone.py", "imports": ["collections", "torch", "math", "os"], "module": "modelscope.models.cv.face_recognition.torchkit.rts_backbone"}, "('MODELS', 'body-3d-keypoints', 'body-3d-keypoints')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/body_3d_keypoints/cannonical_pose/body_3d_pose.py", "imports": ["logging", "numpy", "os", "typing", "torch"], "module": "modelscope.models.cv.body_3d_keypoints.cannonical_pose.body_3d_pose"}, "('MODELS', 'body-3d-keypoints', 'hdformer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/body_3d_keypoints/hdformer/hdformer_detector.py", "imports": ["numpy", "torch", "typing", "os"], "module": "modelscope.models.cv.body_3d_keypoints.hdformer.hdformer_detector"}, "('MODELS', 'pedestrian-attribute-recognition', 'pedestrian-attribute-recognition')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/pedestrian_attribute_recognition/model.py", "imports": ["numpy", "torchvision", "torch", "os"], "module": "modelscope.models.cv.pedestrian_attribute_recognition.model"}, "('MODELS', 'image-segmentation', 'maskdino_swin')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/maskdino_model.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.image_instance_segmentation.maskdino_model"}, "('MODELS', 'image-segmentation', 'cascade_mask_rcnn_swin')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/model.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.image_instance_segmentation.model"}, "('MODELS', 'image-segmentation', 'fastinst')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/fastinst_model.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.image_instance_segmentation.fastinst_model"}, "('MODELS', 'image-depth-estimation', 'bts-depth-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_depth_estimation_bts/depth_estimation_bts_model.py", "imports": ["torch", "os"], "module": "modelscope.models.cv.image_depth_estimation_bts.depth_estimation_bts_model"}, "('MODELS', 'product-segmentation', 'product-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/product_segmentation/seg_infer.py", "imports": ["PIL", "numpy", "torch", "cv2"], "module": "modelscope.models.cv.product_segmentation.seg_infer"}, "('PREPROCESSORS', 'cv', 'image-driving-perception-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_driving_perception/preprocessor.py", "imports": ["numpy", "typing", "torch", "cv2"], "module": "modelscope.models.cv.image_driving_perception.preprocessor"}, "('MODELS', 'image-driving-perception', 'yolopv2')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_driving_perception/image_driving_percetion_model.py", "imports": ["numpy", "cv2", "os", "typing", "torch"], "module": "modelscope.models.cv.image_driving_perception.image_driving_percetion_model"}, "('MODELS', 'face-detection', 'scrfd')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/scrfd_detect.py", "imports": ["numpy", "copy", "os", "typing", "torch"], "module": "modelscope.models.cv.face_detection.scrfd.scrfd_detect"}, "('MODELS', 'card-detection', 'scrfd')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/scrfd_detect.py", "imports": ["numpy", "copy", "os", "typing", "torch"], "module": "modelscope.models.cv.face_detection.scrfd.scrfd_detect"}, "('MODELS', 'face-detection', 'tinymog')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/tinymog_detect.py", "imports": ["typing", "copy", "torch", "os"], "module": "modelscope.models.cv.face_detection.scrfd.tinymog_detect"}, "('PREPROCESSORS', 'cv', 'object-detection-scrfd')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/preprocessor.py", "imports": ["PIL", "numpy", "typing"], "module": "modelscope.models.cv.face_detection.scrfd.preprocessor"}, "('MODELS', 'face-detection', 'damofd')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/damofd_detect.py", "imports": ["typing", "copy", "torch", "os"], "module": "modelscope.models.cv.face_detection.scrfd.damofd_detect"}, "('DETECTORS', 'default', 'CustomSingleStageDetector')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/models/detectors/single_stage.py", "imports": ["mmdet", "torch"], "module": "modelscope.models.cv.face_detection.scrfd.mmdet_patch.models.detectors.single_stage"}, "('DETECTORS', 'default', 'TinyMog')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/models/detectors/tinymog.py", "imports": ["mmdet", "torch"], "module": "modelscope.models.cv.face_detection.scrfd.mmdet_patch.models.detectors.tinymog"}, "('DETECTORS', 'default', 'SCRFD')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/models/detectors/scrfd.py", "imports": ["mmdet", "torch"], "module": "modelscope.models.cv.face_detection.scrfd.mmdet_patch.models.detectors.scrfd"}, "('HEADS', 'default', 'SCRFDHead')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/models/dense_heads/scrfd_head.py", "imports": ["mmcv", "numpy", "mmdet", "torch"], "module": "modelscope.models.cv.face_detection.scrfd.mmdet_patch.models.dense_heads.scrfd_head"}, "('BACKBONES', 'default', 'MasterNet')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/models/backbones/master_net.py", "imports": ["mmdet", "torch"], "module": "modelscope.models.cv.face_detection.scrfd.mmdet_patch.models.backbones.master_net"}, "('BACKBONES', 'default', 'ResNetV1e')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/models/backbones/resnet.py", "imports": ["mmcv", "mmdet", "torch"], "module": "modelscope.models.cv.face_detection.scrfd.mmdet_patch.models.backbones.resnet"}, "('BACKBONES', 'default', 'MobileNetV1')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/models/backbones/mobilenet.py", "imports": ["mmcv", "mmdet", "torch"], "module": "modelscope.models.cv.face_detection.scrfd.mmdet_patch.models.backbones.mobilenet"}, "('PIPELINES', 'default', 'DefaultFormatBundleV2')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/datasets/pipelines/formating.py", "imports": ["mmcv", "numpy", "mmdet", "torch"], "module": "modelscope.models.cv.face_detection.scrfd.mmdet_patch.datasets.pipelines.formating"}, "('PIPELINES', 'default', 'RotateV2')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/datasets/pipelines/auto_augment.py", "imports": ["numpy", "copy", "mmdet", "cv2", "mmcv"], "module": "modelscope.models.cv.face_detection.scrfd.mmdet_patch.datasets.pipelines.auto_augment"}, "('PIPELINES', 'default', 'LoadAnnotationsV2')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/datasets/pipelines/loading.py", "imports": ["numpy", "mmdet", "pycocotools", "os"], "module": "modelscope.models.cv.face_detection.scrfd.mmdet_patch.datasets.pipelines.loading"}, "('PIPELINES', 'default', 'ResizeV2')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/datasets/pipelines/transforms.py", "imports": ["mmcv", "numpy", "mmdet"], "module": "modelscope.models.cv.face_detection.scrfd.mmdet_patch.datasets.pipelines.transforms"}, "('PIPELINES', 'default', 'RandomFlipV2')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/datasets/pipelines/transforms.py", "imports": ["mmcv", "numpy", "mmdet"], "module": "modelscope.models.cv.face_detection.scrfd.mmdet_patch.datasets.pipelines.transforms"}, "('PIPELINES', 'default', 'RandomSquareCrop')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/datasets/pipelines/transforms.py", "imports": ["mmcv", "numpy", "mmdet"], "module": "modelscope.models.cv.face_detection.scrfd.mmdet_patch.datasets.pipelines.transforms"}, "('DATASETS', 'default', 'RetinaFaceDataset')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/datasets/retinaface.py", "imports": ["numpy", "mmdet"], "module": "modelscope.models.cv.face_detection.scrfd.mmdet_patch.datasets.retinaface"}, "('MODELS', 'face-detection', 'mogface')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/mogface/models/detectors.py", "imports": ["numpy", "torch", "cv2", "os"], "module": "modelscope.models.cv.face_detection.mogface.models.detectors"}, "('MODELS', 'face-detection', 'mtcnn')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/mtcnn/models/detector.py", "imports": ["PIL", "numpy", "torch", "os"], "module": "modelscope.models.cv.face_detection.mtcnn.models.detector"}, "('MODELS', 'face-detection', 'ulfd')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/ulfd_slim/detection.py", "imports": ["numpy", "torch", "cv2", "os"], "module": "modelscope.models.cv.face_detection.ulfd_slim.detection"}, "('MODELS', 'face-detection', 'retinaface')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/retinaface/detection.py", "imports": ["numpy", "torch", "cv2"], "module": "modelscope.models.cv.face_detection.retinaface.detection"}, "('MODELS', 'video-deinterlace', 'video-deinterlace')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_deinterlace/UNet_for_video_deinterlace.py", "imports": ["typing", "copy", "torch", "os"], "module": "modelscope.models.cv.video_deinterlace.UNet_for_video_deinterlace"}, "('MODELS', 'face-2d-keypoints', 'flc')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/facial_landmark_confidence/flc/facial_landmark_confidence.py", "imports": ["PIL", "numpy", "cv2", "os", "torch"], "module": "modelscope.models.cv.facial_landmark_confidence.flc.facial_landmark_confidence"}, "('MODELS', 'image-object-detection', 'tinynas-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/tinynas_detector.py", "imports": [], "module": "modelscope.models.cv.tinynas_detection.tinynas_detector"}, "('MODELS', 'domain-specific-object-detection', 'tinynas-damoyolo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/tinynas_damoyolo.py", "imports": [], "module": "modelscope.models.cv.tinynas_detection.tinynas_damoyolo"}, "('MODELS', 'image-object-detection', 'tinynas-damoyolo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/tinynas_damoyolo.py", "imports": [], "module": "modelscope.models.cv.tinynas_detection.tinynas_damoyolo"}, "('MODELS', 'face-emotion', 'face-emotion')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_emotion/emotion_model.py", "imports": ["sys", "torch", "os"], "module": "modelscope.models.cv.face_emotion.emotion_model"}, "('MODELS', 'image-segmentation', 'm2fp')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_human_parsing/m2fp_net.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.image_human_parsing.m2fp_net"}, "('MODELS', 'image-super-resolution', 'ecbsr')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/super_resolution/ecbsr_model.py", "imports": ["typing", "torch", "os"], "module": "modelscope.models.cv.super_resolution.ecbsr_model"}, "('MODELS', 'panorama-depth-estimation', 's2net-depth-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/s2net_panorama_depth_estimation/s2net_model.py", "imports": ["numpy", "torchvision", "torch", "os"], "module": "modelscope.models.cv.s2net_panorama_depth_estimation.s2net_model"}, "('MODELS', 'ocr-detection', 'OCRDetection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_detection/model.py", "imports": ["numpy", "torch", "typing", "os"], "module": "modelscope.models.cv.ocr_detection.model"}, "('PREPROCESSORS', 'cv', 'ocr-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_detection/preprocessor.py", "imports": ["PIL", "numpy", "cv2", "math", "os", "typing", "torch"], "module": "modelscope.models.cv.ocr_detection.preprocessor"}, "('MODELS', 'protein-structure', 'unifold')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/model.py", "imports": ["argparse", "typing", "torch", "os"], "module": "modelscope.models.science.unifold.model"}, "('METRICS', 'default', 'image-ins-seg-coco-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/image_instance_segmentation_metric.py", "imports": ["numpy", "pycocotools", "tempfile", "os", "typing", "collections"], "module": "modelscope.metrics.image_instance_segmentation_metric"}, "('METRICS', 'default', 'ppl')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/ppl_metric.py", "imports": ["numpy", "typing", "torch", "math"], "module": "modelscope.metrics.ppl_metric"}, "('METRICS', 'default', 'loss-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/loss_metric.py", "imports": ["numpy", "sklearn", "typing"], "module": "modelscope.metrics.loss_metric"}, "('METRICS', 'default', 'image-inpainting-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/image_inpainting_metric.py", "imports": ["scipy", "numpy", "torch", "typing"], "module": "modelscope.metrics.image_inpainting_metric"}, "('METRICS', 'default', 'mAP')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/map_metric.py", "imports": ["numpy", "typing"], "module": "modelscope.metrics.map_metric"}, "('METRICS', 'default', 'video-super-resolution-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/video_super_resolution_metric/video_super_resolution_metric.py", "imports": ["numpy", "typing"], "module": "modelscope.metrics.video_super_resolution_metric.video_super_resolution_metric"}, "('METRICS', 'default', 'token-cls-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/token_classification_metric.py", "imports": ["importlib", "numpy", "typing"], "module": "modelscope.metrics.token_classification_metric"}, "('METRICS', 'default', 'video-frame-interpolation-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/video_frame_interpolation_metric.py", "imports": ["numpy", "lpips", "math", "typing", "torch"], "module": "modelscope.metrics.video_frame_interpolation_metric"}, "('METRICS', 'default', 'text-gen-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/text_generation_metric.py", "imports": ["nltk", "contextlib", "rouge", "sys", "typing"], "module": "modelscope.metrics.text_generation_metric"}, "('METRICS', 'default', 'movie-scene-segmentation-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/movie_scene_segmentation_metric.py", "imports": ["numpy", "typing"], "module": "modelscope.metrics.movie_scene_segmentation_metric"}, "('METRICS', 'default', 'ned')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/ned_metric.py", "imports": ["numpy", "typing"], "module": "modelscope.metrics.ned_metric"}, "('METRICS', 'default', 'image-portrait-enhancement-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/image_portrait_enhancement_metric.py", "imports": ["numpy", "typing", "cv2"], "module": "modelscope.metrics.image_portrait_enhancement_metric"}, "('METRICS', 'default', 'seq-cls-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/sequence_classification_metric.py", "imports": ["numpy", "sklearn", "typing"], "module": "modelscope.metrics.sequence_classification_metric"}, "('METRICS', 'default', 'image-denoise-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/image_denoise_metric.py", "imports": ["numpy", "typing", "torch", "cv2"], "module": "modelscope.metrics.image_denoise_metric"}, "('METRICS', 'default', 'translation-evaluation-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/translation_evaluation_metric.py", "imports": ["importlib", "typing", "pandas"], "module": "modelscope.metrics.translation_evaluation_metric"}, "('METRICS', 'default', 'image-colorization-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/image_colorization_metric.py", "imports": ["numpy", "cv2", "typing", "torch", "scipy", "torchvision"], "module": "modelscope.metrics.image_colorization_metric"}, "('METRICS', 'default', 'text-ranking-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/text_ranking_metric.py", "imports": ["numpy", "typing"], "module": "modelscope.metrics.text_ranking_metric"}, "('METRICS', 'default', 'audio-noise-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/audio_noise_metric.py", "imports": ["typing"], "module": "modelscope.metrics.audio_noise_metric"}, "('METRICS', 'default', 'accuracy')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/accuracy_metric.py", "imports": ["numpy", "typing"], "module": "modelscope.metrics.accuracy_metric"}, "('METRICS', 'default', 'image-quality-assessment-degradation-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/image_quality_assessment_degradation_metric.py", "imports": ["scipy", "tqdm", "numpy", "tempfile", "cv2", "os", "sys", "typing", "torch", "collections"], "module": "modelscope.metrics.image_quality_assessment_degradation_metric"}, "('METRICS', 'default', 'image-color-enhance-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/image_color_enhance_metric.py", "imports": ["numpy", "typing", "cv2"], "module": "modelscope.metrics.image_color_enhance_metric"}, "('METRICS', 'default', 'bleu')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/bleu_metric.py", "imports": ["typing", "sacrebleu", "itertools"], "module": "modelscope.metrics.bleu_metric"}, "('METRICS', 'default', 'prediction-saving-wrapper')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/prediction_saving_wrapper.py", "imports": ["numpy", "sklearn", "typing"], "module": "modelscope.metrics.prediction_saving_wrapper"}, "('METRICS', 'default', 'image-quality-assessment-mos-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/image_quality_assessment_mos_metric.py", "imports": ["tqdm", "numpy", "tempfile", "cv2", "os", "sys", "typing", "torch", "scipy"], "module": "modelscope.metrics.image_quality_assessment_mos_metric"}, "('METRICS', 'default', 'referring-video-object-segmentation-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/referring_video_object_segmentation_metric.py", "imports": ["tqdm", "numpy", "pycocotools", "typing", "torch"], "module": "modelscope.metrics.referring_video_object_segmentation_metric"}, "('METRICS', 'default', 'ocr-recognition-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/ocr_recognition_metric.py", "imports": ["edit_distance", "numpy", "torch", "typing"], "module": "modelscope.metrics.ocr_recognition_metric"}, "('METRICS', 'default', 'video-stabilization-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/video_stabilization_metric.py", "imports": ["tqdm", "numpy", "tempfile", "cv2", "os", "sys", "typing"], "module": "modelscope.metrics.video_stabilization_metric"}, "('METRICS', 'default', 'inbatch_recall')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/inbatch_recall_metric.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.metrics.inbatch_recall_metric"}, "('METRICS', 'default', 'video-summarization-metric')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/video_summarization_metric.py", "imports": ["numpy", "typing"], "module": "modelscope.metrics.video_summarization_metric"}, "('PIPELINES', 'task-oriented-conversation', 'dialog-state-tracking')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/dialog_state_tracking_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.nlp.dialog_state_tracking_pipeline"}, "('PIPELINES', 'text-generation', 'gpt3-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/distributed_gpt3_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.nlp.distributed_gpt3_pipeline"}, "('PIPELINES', 'code-translation', 'codegeex-code-translation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/codegeex_code_translation_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.nlp.codegeex_code_translation_pipeline"}, "('PIPELINES', 'siamese-uie', 'siamese-uie')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/siamese_uie_pipeline.py", "imports": ["logging", "tqdm", "copy", "math", "os", "time", "typing", "pathlib", "torch", "scipy", "json"], "module": "modelscope.pipelines.nlp.siamese_uie_pipeline"}, "('PIPELINES', 'text-summarization', 'text-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/summarization_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.nlp.summarization_pipeline"}, "('PIPELINES', 'task-oriented-conversation', 'dialog-modeling')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/dialog_modeling_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.nlp.dialog_modeling_pipeline"}, "('PIPELINES', 'zero-shot-classification', 'zero-shot-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/zero_shot_classification_pipeline.py", "imports": ["scipy", "typing", "torch"], "module": "modelscope.pipelines.nlp.zero_shot_classification_pipeline"}, "('PIPELINES', 'translation-evaluation', 'translation-evaluation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/translation_evaluation_pipeline.py", "imports": ["numpy", "os", "typing", "torch", "enum"], "module": "modelscope.pipelines.nlp.translation_evaluation_pipeline"}, "('PIPELINES', 'fid-dialogue', 'fid-dialogue')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/fid_dialogue_pipeline.py", "imports": ["re", "typing", "torch"], "module": "modelscope.pipelines.nlp.fid_dialogue_pipeline"}, "('PIPELINES', 'document-grounded-dialog-rerank', 'document-grounded-dialog-rerank')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/document_grounded_dialog_rerank_pipeline.py", "imports": ["numpy", "os", "time", "pprint", "re", "random", "sys", "transformers", "typing", "torch", "collections", "ujson"], "module": "modelscope.pipelines.nlp.document_grounded_dialog_rerank_pipeline"}, "('PIPELINES', 'text-classification', 'sentiment-analysis')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.nlp.text_classification_pipeline"}, "('PIPELINES', 'nli', 'nli')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.nlp.text_classification_pipeline"}, "('PIPELINES', 'sentence-similarity', 'sentence-similarity')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.nlp.text_classification_pipeline"}, "('PIPELINES', 'text-classification', 'text-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.nlp.text_classification_pipeline"}, "('PIPELINES', 'text-classification', 'sentiment-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.nlp.text_classification_pipeline"}, "('PIPELINES', 'text-classification', 'sentence-similarity')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.nlp.text_classification_pipeline"}, "('PIPELINES', 'sentiment-classification', 'sentiment-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.nlp.text_classification_pipeline"}, "('PIPELINES', 'document-grounded-dialog-generate', 'document-grounded-dialog-generate')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/document_grounded_dialog_generate_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.nlp.document_grounded_dialog_generate_pipeline"}, "('PIPELINES', 'machine-reading-comprehension', 'machine-reading-comprehension-for-ner')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/machine_reading_comprehension_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.nlp.machine_reading_comprehension_pipeline"}, "('PIPELINES', 'token-classification', 'token-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/token_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.nlp.token_classification_pipeline"}, "('PIPELINES', 'token-classification', 'part-of-speech')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/token_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.nlp.token_classification_pipeline"}, "('PIPELINES', 'token-classification', 'word-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/token_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.nlp.token_classification_pipeline"}, "('PIPELINES', 'token-classification', 'named-entity-recognition')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/token_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.nlp.token_classification_pipeline"}, "('PIPELINES', 'part-of-speech', 'part-of-speech')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/token_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.nlp.token_classification_pipeline"}, "('PIPELINES', 'translation', 'csanmt-translation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/translation_pipeline.py", "imports": ["numpy", "os", "tensorflow", "typing", "subword_nmt", "jieba", "sacremoses"], "module": "modelscope.pipelines.nlp.translation_pipeline"}, "('PIPELINES', 'sentence-embedding', 'sentence-embedding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/sentence_embedding_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.nlp.sentence_embedding_pipeline"}, "('PIPELINES', 'faq-question-answering', 'faq-question-answering')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/faq_question_answering_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.nlp.faq_question_answering_pipeline"}, "('PIPELINES', 'feature-extraction', 'feature-extraction')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/feature_extraction_pipeline.py", "imports": ["typing", "torch", "os"], "module": "modelscope.pipelines.nlp.feature_extraction_pipeline"}, "('PIPELINES', 'text-generation', 'gpt-moe-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/distributed_gpt_moe_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.nlp.distributed_gpt_moe_pipeline"}, "('PIPELINES', 'text-generation', 'text-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_generation_pipeline.py", "imports": ["typing", "transformers", "torch", "os"], "module": "modelscope.pipelines.nlp.text_generation_pipeline"}, "('PIPELINES', 'text2text-generation', 'translation_en_to_de')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_generation_pipeline.py", "imports": ["typing", "transformers", "torch", "os"], "module": "modelscope.pipelines.nlp.text_generation_pipeline"}, "('PIPELINES', 'text2text-generation', 'translation_en_to_ro')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_generation_pipeline.py", "imports": ["typing", "transformers", "torch", "os"], "module": "modelscope.pipelines.nlp.text_generation_pipeline"}, "('PIPELINES', 'text2text-generation', 'translation_en_to_fr')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_generation_pipeline.py", "imports": ["typing", "transformers", "torch", "os"], "module": "modelscope.pipelines.nlp.text_generation_pipeline"}, "('PIPELINES', 'text2text-generation', 'text2text-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_generation_pipeline.py", "imports": ["typing", "transformers", "torch", "os"], "module": "modelscope.pipelines.nlp.text_generation_pipeline"}, "('PIPELINES', 'chat', 'chatglm6b-text-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_generation_pipeline.py", "imports": ["typing", "transformers", "torch", "os"], "module": "modelscope.pipelines.nlp.text_generation_pipeline"}, "('PIPELINES', 'chat', 'chatglm2_6b-text-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_generation_pipeline.py", "imports": ["typing", "transformers", "torch", "os"], "module": "modelscope.pipelines.nlp.text_generation_pipeline"}, "('PIPELINES', 'chat', 'qwen-chat')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_generation_pipeline.py", "imports": ["typing", "transformers", "torch", "os"], "module": "modelscope.pipelines.nlp.text_generation_pipeline"}, "('PIPELINES', 'text-generation', 'qwen-text-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_generation_pipeline.py", "imports": ["typing", "transformers", "torch", "os"], "module": "modelscope.pipelines.nlp.text_generation_pipeline"}, "('PIPELINES', 'table-question-answering', 'conversational-text-to-sql')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/conversational_text_to_sql_pipeline.py", "imports": ["text2sql_lgesql", "typing", "torch"], "module": "modelscope.pipelines.nlp.conversational_text_to_sql_pipeline"}, "('PIPELINES', 'text-generation', 'llama2-text-generation-pipeline')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/llama2_text_generation_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.nlp.llama2_text_generation_pipeline"}, "('PIPELINES', 'competency-aware-translation', 'canmt-translation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/canmt_translation_pipeline.py", "imports": ["sacremoses", "typing", "torch", "os"], "module": "modelscope.pipelines.nlp.canmt_translation_pipeline"}, "('PIPELINES', 'information-extraction', 'relation-extraction')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/information_extraction_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.nlp.information_extraction_pipeline"}, "('PIPELINES', 'relation-extraction', 'relation-extraction')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/information_extraction_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.nlp.information_extraction_pipeline"}, "('PIPELINES', 'extractive-summarization', 'extractive-summarization')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/extractive_summarization_pipeline.py", "imports": ["numpy", "datasets", "re", "typing", "torch"], "module": "modelscope.pipelines.nlp.extractive_summarization_pipeline"}, "('PIPELINES', 'text-classification', 'domain-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/fasttext_text_classification_pipeline.py", "imports": ["numpy", "os", "sentencepiece", "typing", "fasttext"], "module": "modelscope.pipelines.nlp.fasttext_text_classification_pipeline"}, "('PIPELINES', 'table-question-answering', 'table-question-answering-pipeline')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/table_question_answering_pipeline.py", "imports": ["os", "typing", "transformers", "torch", "json"], "module": "modelscope.pipelines.nlp.table_question_answering_pipeline"}, "('PIPELINES', 'text-generation', 'glm130b-text-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/glm130b_text_generation_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.nlp.glm130b_text_generation_pipeline"}, "('PIPELINES', 'fill-mask', 'fill-mask')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/fill_mask_pipeline.py", "imports": ["numpy", "typing"], "module": "modelscope.pipelines.nlp.fill_mask_pipeline"}, "('PIPELINES', 'fill-mask', 'fill-mask-ponet')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/fill_mask_pipeline.py", "imports": ["numpy", "typing"], "module": "modelscope.pipelines.nlp.fill_mask_pipeline"}, "('PIPELINES', 'translation', 'automatic-post-editing')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/automatic_post_editing_pipeline.py", "imports": ["numpy", "html", "os", "sentencepiece", "tensorflow", "typing", "jieba", "sacremoses"], "module": "modelscope.pipelines.nlp.automatic_post_editing_pipeline"}, "('PIPELINES', 'document-segmentation', 'document-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/document_segmentation_pipeline.py", "imports": ["numpy", "datasets", "re", "typing", "torch"], "module": "modelscope.pipelines.nlp.document_segmentation_pipeline"}, "('PIPELINES', 'word-alignment', 'word-alignment')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/word_alignment_pipeline.py", "imports": ["numpy", "typing"], "module": "modelscope.pipelines.nlp.word_alignment_pipeline"}, "('PIPELINES', 'sentence-similarity', 'translation-quality-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/translation_quality_estimation_pipeline.py", "imports": ["os", "typing", "transformers", "torch", "io"], "module": "modelscope.pipelines.nlp.translation_quality_estimation_pipeline"}, "('PIPELINES', 'code-generation', 'codegeex-code-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/codegeex_code_generation_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.nlp.codegeex_code_generation_pipeline"}, "('PIPELINES', 'text-generation', 'polylm-text-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/polylm_text_generation_pipeline.py", "imports": ["typing", "torch", "os"], "module": "modelscope.pipelines.nlp.polylm_text_generation_pipeline"}, "('PIPELINES', 'text-generation', 'plug-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/distributed_plug_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.nlp.distributed_plug_pipeline"}, "('PIPELINES', 'text-ranking', 'text-ranking')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_ranking_pipeline.py", "imports": ["numpy", "typing"], "module": "modelscope.pipelines.nlp.text_ranking_pipeline"}, "('PIPELINES', 'text-classification', 'language_identification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/language_identification_pipline.py", "imports": ["numpy", "os", "re", "tensorflow", "typing"], "module": "modelscope.pipelines.nlp.language_identification_pipline"}, "('PIPELINES', 'task-oriented-conversation', 'dialog-intent-prediction')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/dialog_intent_prediction_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.nlp.dialog_intent_prediction_pipeline"}, "('PIPELINES', 'word-segmentation', 'word-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/word_segmentation_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.nlp.word_segmentation_pipeline"}, "('PIPELINES', 'word-segmentation', 'multilingual-word-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/word_segmentation_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.nlp.word_segmentation_pipeline"}, "('PIPELINES', 'word-segmentation', 'word-segmentation-thai')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/word_segmentation_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.nlp.word_segmentation_pipeline"}, "('PIPELINES', 'text-error-correction', 'text-error-correction')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_error_correction_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.nlp.text_error_correction_pipeline"}, "('PIPELINES', 'text-summarization', 'mglm-text-summarization')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/mglm_text_summarization_pipeline.py", "imports": ["typing", "os"], "module": "modelscope.pipelines.nlp.mglm_text_summarization_pipeline"}, "('PIPELINES', 'named-entity-recognition', 'named-entity-recognition')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/named_entity_recognition_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.nlp.named_entity_recognition_pipeline"}, "('PIPELINES', 'named-entity-recognition', 'named-entity-recognition-thai')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/named_entity_recognition_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.nlp.named_entity_recognition_pipeline"}, "('PIPELINES', 'named-entity-recognition', 'named-entity-recognition-viet')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/named_entity_recognition_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.nlp.named_entity_recognition_pipeline"}, "('PIPELINES', 'document-grounded-dialog-retrieval', 'document-grounded-dialog-retrieval')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/document_grounded_dialog_retrieval_pipeline.py", "imports": ["numpy", "os", "typing", "json", "faiss"], "module": "modelscope.pipelines.nlp.document_grounded_dialog_retrieval_pipeline"}, "('PIPELINES', 'translation', 'interactive-translation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/interactive_translation_pipeline.py", "imports": ["numpy", "os", "tensorflow", "typing", "subword_nmt", "jieba", "sacremoses"], "module": "modelscope.pipelines.nlp.interactive_translation_pipeline"}, "('PIPELINES', 'text-classification', 'user-satisfaction-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/user_satisfaction_estimation_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.nlp.user_satisfaction_estimation_pipeline"}, "('PIPELINES', 'image-text-retrieval', 'image-text-retrieval')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/image_text_retrieval_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.multi_modal.image_text_retrieval_pipeline"}, "('PIPELINES', 'video-temporal-grounding', 'soonet-video-temporal-grounding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/soonet_video_temporal_grounding_pipeline.py", "imports": ["numpy", "os", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.multi_modal.soonet_video_temporal_grounding_pipeline"}, "('PIPELINES', 'video-multi-modal-embedding', 'video-multi-modal-embedding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/video_multi_modal_embedding_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.multi_modal.video_multi_modal_embedding_pipeline"}, "('PIPELINES', 'multimodal-dialogue', 'multimodal-dialogue')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/multimodal_dialogue_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.multi_modal.multimodal_dialogue_pipeline"}, "('PIPELINES', 'document-vl-embedding', 'document-vl-embedding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/document_vl_embedding_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.multi_modal.document_vl_embedding_pipeline"}, "('PIPELINES', 'text-to-video-synthesis', 'videocomposer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/videocomposer_pipeline.py", "imports": ["PIL", "numpy", "tempfile", "cv2", "time", "os", "imageio", "functools", "subprocess", "random", "typing", "torch", "mvextractor", "torchvision"], "module": "modelscope.pipelines.multi_modal.videocomposer_pipeline"}, "('PIPELINES', 'visual-question-answering', 'gridvlp-multi-modal-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/gridvlp_pipeline.py", "imports": ["PIL", "numpy", "time", "os", "traceback", "typing", "transformers", "torch", "json"], "module": "modelscope.pipelines.multi_modal.gridvlp_pipeline"}, "('PIPELINES', 'multi-modal-embedding', 'gridvlp-multi-modal-embedding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/gridvlp_pipeline.py", "imports": ["PIL", "numpy", "time", "os", "traceback", "typing", "transformers", "torch", "json"], "module": "modelscope.pipelines.multi_modal.gridvlp_pipeline"}, "('PIPELINES', 'text2sql', 'ofa-text2sql')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/text2sql_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.multi_modal.text2sql_pipeline"}, "('PIPELINES', 'image-to-video', 'image-to-video-task-pipeline')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/image_to_video_pipeline.py", "imports": ["tempfile", "cv2", "os", "einops", "subprocess", "typing", "torch"], "module": "modelscope.pipelines.multi_modal.image_to_video_pipeline"}, "('PIPELINES', 'image-text-retrieval', 'multi-modal-embedding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/multi_modal_embedding_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.multi_modal.multi_modal_embedding_pipeline"}, "('PIPELINES', 'multi-modal-embedding', 'multi-modal-embedding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/multi_modal_embedding_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.multi_modal.multi_modal_embedding_pipeline"}, "('PIPELINES', 'text-to-image-synthesis', 'chinese-stable-diffusion')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/chinese_stable_diffusion_pipeline.py", "imports": ["PIL", "numpy", "cv2", "typing", "transformers", "torch", "diffusers"], "module": "modelscope.pipelines.multi_modal.diffusers_wrapped.stable_diffusion.chinese_stable_diffusion_pipeline"}, "('PIPELINES', 'text-to-image-synthesis', 'diffusers-stable-diffusion')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "diffusers", "torchvision"], "module": "modelscope.pipelines.multi_modal.diffusers_wrapped.stable_diffusion.stable_diffusion_pipeline"}, "('PIPELINES', 'efficient-diffusion-tuning', 'efficient-diffusion-tuning')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/efficient_diffusion_tuning_pipeline.py", "imports": ["PIL", "numpy", "cv2", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.multi_modal.efficient_diffusion_tuning_pipeline"}, "('PIPELINES', 'image-captioning', 'image-captioning')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/image_captioning_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.multi_modal.image_captioning_pipeline"}, "('PIPELINES', 'visual-entailment', 'visual-entailment')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/visual_entailment_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.multi_modal.visual_entailment_pipeline"}, "('PIPELINES', 'multi-modal-similarity', 'multi-modal-similarity')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/team_multi_modal_similarity_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.multi_modal.team_multi_modal_similarity_pipeline"}, "('PIPELINES', 'text-to-image-synthesis', 'text-to-image-synthesis')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/text_to_image_synthesis_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.multi_modal.text_to_image_synthesis_pipeline"}, "('PIPELINES', 'ocr-recognition', 'ofa-ocr-recognition')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/ocr_recognition_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.multi_modal.ocr_recognition_pipeline"}, "('PIPELINES', 'text-ranking', 'mgeo-ranking')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/mgeo_ranking_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.multi_modal.mgeo_ranking_pipeline"}, "('PIPELINES', 'video-to-video', 'video-to-video-pipeline')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/video_to_video_pipeline.py", "imports": ["tempfile", "cv2", "os", "einops", "subprocess", "typing", "torch"], "module": "modelscope.pipelines.multi_modal.video_to_video_pipeline"}, "('PIPELINES', 'text-to-image-synthesis', 'disco_guided_diffusion')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/disco_guided_diffusion_pipeline/disco_guided_diffusion.py", "imports": ["importlib", "numpy", "PIL", "math", "os", "gc", "cv2", "clip", "torch", "json", "torchvision"], "module": "modelscope.pipelines.multi_modal.disco_guided_diffusion_pipeline.disco_guided_diffusion"}, "('PIPELINES', 'generative-multi-modal-embedding', 'generative-multi-modal-embedding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/generative_multi_modal_embedding_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.multi_modal.generative_multi_modal_embedding_pipeline"}, "('PIPELINES', 'visual-question-answering', 'visual-question-answering')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/visual_question_answering_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.multi_modal.visual_question_answering_pipeline"}, "('PIPELINES', 'text-to-video-synthesis', 'latent-text-to-video-synthesis')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/text_to_video_synthesis_pipeline.py", "imports": ["einops", "tempfile", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.multi_modal.text_to_video_synthesis_pipeline"}, "('PIPELINES', 'sudoku', 'ofa-sudoku')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/sudoku_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.multi_modal.sudoku_pipeline"}, "('PIPELINES', 'video-question-answering', 'video-question-answering')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/video_question_answering_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.multi_modal.video_question_answering_pipeline"}, "('PIPELINES', 'auto-speech-recognition', 'ofa-asr')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/asr_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.multi_modal.asr_pipeline"}, "('PIPELINES', 'visual-grounding', 'visual-grounding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/visual_grounding_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.multi_modal.visual_grounding_pipeline"}, "('PIPELINES', 'video-captioning', 'video-captioning')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/video_captioning_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.multi_modal.video_captioning_pipeline"}, "('PIPELINES', 'speaker-verification', 'sv-inference')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/speaker_verification_pipeline.py", "imports": ["shutil", "typing", "yaml", "os"], "module": "modelscope.pipelines.audio.speaker_verification_pipeline"}, "('PIPELINES', 'speech-timestamp', 'speech-timestamp-inference')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/timestamp_pipeline.py", "imports": ["os", "typing", "yaml", "json", "funasr"], "module": "modelscope.pipelines.audio.timestamp_pipeline"}, "('PIPELINES', 'speech-language-recognition', 'speech-language-recognition')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/language_recognition_pipeline.py", "imports": ["numpy", "soundfile", "os", "torchaudio", "typing", "torch", "io"], "module": "modelscope.pipelines.audio.language_recognition_pipeline"}, "('PIPELINES', 'speaker-diarization-dialogue-detection', 'speaker-diarization-dialogue-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/speaker_diarization_dialogue_detection_pipeline.py", "imports": ["numpy", "typing"], "module": "modelscope.pipelines.audio.speaker_diarization_dialogue_detection_pipeline"}, "('PIPELINES', 'keyword-spotting', 'speech_dfsmn_kws_char_farfield')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/kws_farfield_pipeline.py", "imports": ["numpy", "soundfile", "typing", "io", "wave"], "module": "modelscope.pipelines.audio.kws_farfield_pipeline"}, "('PIPELINES', 'language-score-prediction', 'language-score-prediction')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/lm_infer_pipeline.py", "imports": ["typing", "os"], "module": "modelscope.pipelines.audio.lm_infer_pipeline"}, "('PIPELINES', 'speaker-diarization', 'speaker-change-locating')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/speaker_change_locating_pipeline.py", "imports": ["numpy", "soundfile", "torchaudio", "typing", "torch", "io"], "module": "modelscope.pipelines.audio.speaker_change_locating_pipeline"}, "('PIPELINES', 'auto-speech-recognition', 'asr-wenet-inference')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/asr_wenet_inference_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.audio.asr_wenet_inference_pipeline"}, "('PIPELINES', 'acoustic-noise-suppression', 'speech_frcrn_ans_cirm_16k')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/ans_pipeline.py", "imports": ["numpy", "soundfile", "typing", "torch", "io", "librosa"], "module": "modelscope.pipelines.audio.ans_pipeline"}, "('PIPELINES', 'speaker-verification', 'speaker-verification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/speaker_verification_light_pipeline.py", "imports": ["numpy", "soundfile", "os", "torchaudio", "typing", "torch", "io"], "module": "modelscope.pipelines.audio.speaker_verification_light_pipeline"}, "('PIPELINES', 'auto-speech-recognition', 'asr-inference')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/asr_inference_pipeline.py", "imports": ["typing", "json", "yaml", "os"], "module": "modelscope.pipelines.audio.asr_inference_pipeline"}, "('PIPELINES', 'acoustic-noise-suppression', 'speech_dfsmn_ans_psm_48k_causal')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/ans_dfsmn_pipeline.py", "imports": ["numpy", "soundfile", "os", "sys", "typing", "torch", "io", "collections", "librosa"], "module": "modelscope.pipelines.audio.ans_dfsmn_pipeline"}, "('PIPELINES', 'text-to-speech', 'sambert-hifigan-tts')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/text_to_speech_pipeline.py", "imports": ["numpy", "typing"], "module": "modelscope.pipelines.audio.text_to_speech_pipeline"}, "('PIPELINES', 'inverse-text-processing', 'itn-inference')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/inverse_text_processing_pipeline.py", "imports": ["shutil", "typing", "yaml", "os"], "module": "modelscope.pipelines.audio.inverse_text_processing_pipeline"}, "('PIPELINES', 'speaker-diarization-semantic-speaker-turn-detection', 'speaker-diarization-semantic-speaker-turn-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/speaker_diarization_semantic_speaker_turn_detection_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.audio.speaker_diarization_semantic_speaker_turn_detection_pipeline"}, "('PIPELINES', 'keyword-spotting', 'kws-kwsbp')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/kws_kwsbp_pipeline.py", "imports": ["typing", "json", "os"], "module": "modelscope.pipelines.audio.kws_kwsbp_pipeline"}, "('PIPELINES', 'punctuation', 'punc-inference')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/punctuation_processing_pipeline.py", "imports": ["shutil", "typing", "yaml", "os"], "module": "modelscope.pipelines.audio.punctuation_processing_pipeline"}, "('PIPELINES', 'speaker-diarization', 'segmentation-clustering')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/segmentation_clustering_pipeline.py", "imports": ["numpy", "soundfile", "torchaudio", "typing", "torch", "io"], "module": "modelscope.pipelines.audio.segmentation_clustering_pipeline"}, "('PIPELINES', 'speech-separation', 'speech-separation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/separation_pipeline.py", "imports": ["numpy", "soundfile", "typing", "torch", "io"], "module": "modelscope.pipelines.audio.separation_pipeline"}, "('PIPELINES', 'speaker-verification', 'speaker-verification-rdino')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/speaker_verification_rdino_pipeline.py", "imports": ["io", "typing", "soundfile", "torch"], "module": "modelscope.pipelines.audio.speaker_verification_rdino_pipeline"}, "('PIPELINES', 'speaker-verification', 'speaker-verification-eres2net')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/speaker_verification_eres2net_pipeline.py", "imports": ["io", "typing", "soundfile", "torch"], "module": "modelscope.pipelines.audio.speaker_verification_eres2net_pipeline"}, "('PIPELINES', 'voice-activity-detection', 'vad-inference')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/voice_activity_detection_pipeline.py", "imports": ["os", "typing", "yaml", "json", "funasr"], "module": "modelscope.pipelines.audio.voice_activity_detection_pipeline"}, "('PIPELINES', 'speaker-diarization', 'speaker-diarization-inference')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/speaker_diarization_pipeline.py", "imports": ["numpy", "os", "shutil", "typing", "yaml", "json"], "module": "modelscope.pipelines.audio.speaker_diarization_pipeline"}, "('PIPELINES', 'acoustic-echo-cancellation', 'speech-dfsmn-aec-psm-16k')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/linear_aec_pipeline.py", "imports": ["importlib", "numpy", "os", "typing", "yaml", "torch", "scipy"], "module": "modelscope.pipelines.audio.linear_aec_pipeline"}, "('PIPELINES', 'image-portrait-enhancement', 'gpen-image-portrait-enhancement')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_portrait_enhancement_pipeline.py", "imports": ["PIL", "numpy", "cv2", "math", "typing", "torch", "scipy"], "module": "modelscope.pipelines.cv.image_portrait_enhancement_pipeline"}, "('PIPELINES', 'video-text-retrieval', 'vop-video-text-retrieval')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/vop_retrieval_pipeline.py", "imports": ["tqdm", "numpy", "math", "os", "random", "typing", "pickle", "torch", "collections", "gzip"], "module": "modelscope.pipelines.cv.vop_retrieval_pipeline"}, "('PIPELINES', 'face-attribute-recognition', 'resnet34-face-attribute-recognition-fairface')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_attribute_recognition_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.face_attribute_recognition_pipeline"}, "('PIPELINES', 'vision-efficient-tuning', 'vision-efficient-tuning')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/vision_efficient_tuning_pipeline.py", "imports": ["numpy", "torch", "typing", "torchvision"], "module": "modelscope.pipelines.cv.vision_efficient_tuning_pipeline"}, "('PIPELINES', 'face-recognition', 'resnet-face-recognition-facemask')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/mask_face_recognition_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "collections"], "module": "modelscope.pipelines.cv.mask_face_recognition_pipeline"}, "('PIPELINES', 'image-depth-estimation', 'image-bts-depth-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_bts_depth_estimation_pipeline.py", "imports": ["numpy", "cv2", "typing", "albumentations", "torch"], "module": "modelscope.pipelines.cv.image_bts_depth_estimation_pipeline"}, "('PIPELINES', 'video-panoptic-segmentation', 'video-panoptic-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_panoptic_segmentation_pipeline.py", "imports": ["tqdm", "numpy", "cv2", "os", "typing", "torch", "mmcv"], "module": "modelscope.pipelines.cv.video_panoptic_segmentation_pipeline"}, "('PIPELINES', 'video-colorization', 'video-colorization')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_colorization_pipeline.py", "imports": ["PIL", "numpy", "tempfile", "cv2", "os", "subprocess", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.video_colorization_pipeline"}, "('PIPELINES', 'face-recognition', 'ir50-face-recognition-arcface')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/arc_face_recognition_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.arc_face_recognition_pipeline"}, "('PIPELINES', 'action-detection', 'ResNetC3D-action-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/action_detection_pipeline.py", "imports": ["typing", "math", "os"], "module": "modelscope.pipelines.cv.action_detection_pipeline"}, "('PIPELINES', 'image-paintbyexample', 'stablediffusion-paintbyexample')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_paintbyexample_pipeline.py", "imports": ["PIL", "numpy", "einops", "cv2", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.image_paintbyexample_pipeline"}, "('PIPELINES', 'table-recognition', 'dla34-table-recognition')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/table_recognition_pipeline.py", "imports": ["PIL", "numpy", "cv2", "math", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.table_recognition_pipeline"}, "('PIPELINES', 'image-to-image-translation', 'image-to-image-translation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_to_image_translation_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "sys", "typing", "torch", "io", "torchvision"], "module": "modelscope.pipelines.cv.image_to_image_translation_pipeline"}, "('PIPELINES', 'image-classification', 'resnet50-image-classification-cc')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/content_check_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.content_check_pipeline"}, "('PIPELINES', 'image-classification', 'image-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.cv.image_classification_pipeline"}, "('PIPELINES', 'image-classification', 'vit-base_image-classification_ImageNet-labels')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.cv.image_classification_pipeline"}, "('PIPELINES', 'image-classification', 'vit-base_image-classification_Dailylife-labels')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.cv.image_classification_pipeline"}, "('PIPELINES', 'image-classification', 'nextvit-small_image-classification_Dailylife-labels')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.cv.image_classification_pipeline"}, "('PIPELINES', 'image-classification', 'convnext-base_image-classification_garbage')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.cv.image_classification_pipeline"}, "('PIPELINES', 'image-classification', 'common-image-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.cv.image_classification_pipeline"}, "('PIPELINES', 'image-classification', 'easyrobust-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.cv.image_classification_pipeline"}, "('PIPELINES', 'image-classification', 'bnext-small_image-classification_ImageNet-labels')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_classification_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.cv.image_classification_pipeline"}, "('PIPELINES', 'video-human-matting', 'video-human-matting')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_human_matting_pipeline.py", "imports": ["moviepy", "numpy", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.video_human_matting_pipeline"}, "('PIPELINES', 'action-recognition', 'TAdaConv_action-recognition')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/action_recognition_pipeline.py", "imports": ["typing", "torch", "math", "os"], "module": "modelscope.pipelines.cv.action_recognition_pipeline"}, "('PIPELINES', 'action-recognition', 'patchshift-action-recognition')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/action_recognition_pipeline.py", "imports": ["typing", "torch", "math", "os"], "module": "modelscope.pipelines.cv.action_recognition_pipeline"}, "('PIPELINES', 'image-face-fusion', 'image-face-fusion')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_face_fusion_pipeline.py", "imports": ["numpy", "typing"], "module": "modelscope.pipelines.cv.image_face_fusion_pipeline"}, "('PIPELINES', 'video-text-retrieval', 'vop-video-text-retrieval-se')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/vop_retrieval_se_pipeline.py", "imports": ["numpy", "os", "typing", "torch", "gzip"], "module": "modelscope.pipelines.cv.vop_retrieval_se_pipeline"}, "('PIPELINES', 'image-multi-view-depth-estimation', 'image-multi-view-depth-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_mvs_depth_estimation_pipeline.py", "imports": ["shutil", "typing", "tempfile", "os"], "module": "modelscope.pipelines.cv.image_mvs_depth_estimation_pipeline"}, "('PIPELINES', 'video-object-detection', 'cspnet_realtime-video-object-detection_streamyolo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/realtime_video_object_detection_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "json", "torchvision"], "module": "modelscope.pipelines.cv.realtime_video_object_detection_pipeline"}, "('PIPELINES', 'pointcloud-sceneflow-estimation', 'pointcloud-sceneflow-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/pointcloud_sceneflow_estimation_pipeline.py", "imports": ["numpy", "torch", "typing", "plyfile"], "module": "modelscope.pipelines.cv.pointcloud_sceneflow_estimation_pipeline"}, "('PIPELINES', 'video-embedding', 'hicossl-s3dg-video_embedding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/hicossl_video_embedding_pipeline.py", "imports": ["typing", "torch", "math", "os"], "module": "modelscope.pipelines.cv.hicossl_video_embedding_pipeline"}, "('PIPELINES', 'image-deblurring', 'nafnet-image-deblur')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_deblur_pipeline.py", "imports": ["typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.image_deblur_pipeline"}, "('PIPELINES', 'body-2d-keypoints', 'hrnetv2w32_body-2d-keypoints_image')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/body_2d_keypoints_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "json", "torchvision"], "module": "modelscope.pipelines.cv.body_2d_keypoints_pipeline"}, "('PIPELINES', 'image-segmentation', 'maskdino-swin-image-instance-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/maskdino_instance_segmentation_pipeline.py", "imports": ["typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.maskdino_instance_segmentation_pipeline"}, "('PIPELINES', 'face-image-generation', 'gan-face-image-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_image_generation_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.face_image_generation_pipeline"}, "('PIPELINES', 'image-fewshot-detection', 'image-fewshot-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_defrcn_fewshot_pipeline.py", "imports": ["numpy", "torch", "typing", "os"], "module": "modelscope.pipelines.cv.image_defrcn_fewshot_pipeline"}, "('PIPELINES', 'video-stabilization', 'video-stabilization')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_stabilization_pipeline.py", "imports": ["numpy", "tempfile", "math", "os", "cv2", "subprocess", "typing", "glob", "torch"], "module": "modelscope.pipelines.cv.video_stabilization_pipeline"}, "('PIPELINES', 'face-recognition', 'ir101-face-recognition-cfglint')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_recognition_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.face_recognition_pipeline"}, "('PIPELINES', 'controllable-image-generation', 'controllable-image-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/controllable_image_generation_pipeline.py", "imports": ["numpy", "tempfile", "math", "os", "cv2", "subprocess", "typing", "glob", "torch"], "module": "modelscope.pipelines.cv.controllable_image_generation_pipeline"}, "('PIPELINES', 'video-depth-estimation', 'video-depth-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_depth_estimation_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.cv.video_depth_estimation_pipeline"}, "('PIPELINES', 'text-to-360panorama-image', 'text-to-360panorama-image')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/text_to_360panorama_image_pipeline.py", "imports": ["PIL", "numpy", "basicsr", "random", "typing", "torch", "realesrgan", "diffusers"], "module": "modelscope.pipelines.cv.text_to_360panorama_image_pipeline"}, "('PIPELINES', 'image-colorization', 'ddcolor-image-colorization')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ddcolor_image_colorization_pipeline.py", "imports": ["numpy", "cv2", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.ddcolor_image_colorization_pipeline"}, "('PIPELINES', 'license-plate-detection', 'resnet18-license-plate-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/license_plate_detection_pipeline.py", "imports": ["PIL", "numpy", "cv2", "math", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.license_plate_detection_pipeline"}, "('PIPELINES', 'image-segmentation', 'm2fp-image-human-parsing')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_human_parsing_pipeline.py", "imports": ["numpy", "torch", "typing", "torchvision"], "module": "modelscope.pipelines.cv.image_human_parsing_pipeline"}, "('PIPELINES', 'crowd-counting', 'hrnet-crowd-counting')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/crowd_counting_pipeline.py", "imports": ["PIL", "numpy", "math", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.crowd_counting_pipeline"}, "('PIPELINES', 'image-reid-person', 'passvitb-image-reid-person')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_reid_person_pipeline.py", "imports": ["PIL", "math", "os", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.image_reid_person_pipeline"}, "('PIPELINES', 'image-driving-perception', 'yolopv2_image-driving-percetion_bdd100k')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_driving_perception_pipeline.py", "imports": ["numpy", "typing", "cv2", "os"], "module": "modelscope.pipelines.cv.image_driving_perception_pipeline"}, "('PIPELINES', 'nerf-recon-vq-compression', 'nerf-recon-vq-compression')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/nerf_recon_vq_compression_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.cv.nerf_recon_vq_compression_pipeline"}, "('PIPELINES', 'image-super-resolution', 'mobile-image-super-resolution')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/mobile_image_super_resolution_pipeline.py", "imports": ["numpy", "skimage", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.mobile_image_super_resolution_pipeline"}, "('PIPELINES', 'image-skychange', 'image-skychange')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_skychange_pipeline.py", "imports": ["PIL", "numpy", "pdb", "cv2", "time", "typing"], "module": "modelscope.pipelines.cv.image_skychange_pipeline"}, "('PIPELINES', 'video-embedding', 'cmdssl-r2p1d_video_embedding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/cmdssl_video_embedding_pipeline.py", "imports": ["PIL", "numpy", "os", "typing", "torch", "decord", "torchvision"], "module": "modelscope.pipelines.cv.cmdssl_video_embedding_pipeline"}, "('PIPELINES', 'video-frame-interpolation', 'video-frame-interpolation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_frame_interpolation_pipeline.py", "imports": ["numpy", "tempfile", "math", "os", "cv2", "subprocess", "typing", "glob", "torch", "torchvision"], "module": "modelscope.pipelines.cv.video_frame_interpolation_pipeline"}, "('PIPELINES', 'human-detection', 'resnet18-human-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_detection_pipeline.py", "imports": ["numpy", "typing"], "module": "modelscope.pipelines.cv.image_detection_pipeline"}, "('PIPELINES', 'image-object-detection', 'vit-object-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_detection_pipeline.py", "imports": ["numpy", "typing"], "module": "modelscope.pipelines.cv.image_detection_pipeline"}, "('PIPELINES', 'image-object-detection', 'abnormal-object-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_detection_pipeline.py", "imports": ["numpy", "typing"], "module": "modelscope.pipelines.cv.image_detection_pipeline"}, "('PIPELINES', 'face-emotion', 'face-emotion')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_emotion_pipeline.py", "imports": ["numpy", "typing"], "module": "modelscope.pipelines.cv.face_emotion_pipeline"}, "('PIPELINES', 'image-object-detection', 'tbs-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/tbs_detection_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "colorsys", "typing", "torch"], "module": "modelscope.pipelines.cv.tbs_detection_pipeline"}, "('PIPELINES', 'image-matching', 'image-matching')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_matching_pipeline.py", "imports": ["PIL", "numpy", "cv2", "typing", "torch"], "module": "modelscope.pipelines.cv.image_matching_pipeline"}, "('PIPELINES', 'product-segmentation', 'product-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/product_segmentation_pipeline.py", "imports": ["numpy", "typing"], "module": "modelscope.pipelines.cv.product_segmentation_pipeline"}, "('PIPELINES', 'shop-segmentation', 'shop-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/shop_segmentation_pipleline.py", "imports": ["typing"], "module": "modelscope.pipelines.cv.shop_segmentation_pipleline"}, "('PIPELINES', 'human-reconstruction', 'human-reconstruction')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/human_reconstruction_pipeline.py", "imports": ["numpy", "os", "shutil", "typing", "torch", "trimesh"], "module": "modelscope.pipelines.cv.human_reconstruction_pipeline"}, "('PIPELINES', 'video-multi-object-tracking', 'video-multi-object-tracking')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_multi_object_tracking_pipeline.py", "imports": ["typing", "torch", "os"], "module": "modelscope.pipelines.cv.video_multi_object_tracking_pipeline"}, "('PIPELINES', 'image-classification', 'tinynas-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/tinynas_classification_pipeline.py", "imports": ["math", "os", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.tinynas_classification_pipeline"}, "('PIPELINES', 'video-inpainting', 'video-inpainting')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_inpainting_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.cv.video_inpainting_pipeline"}, "('PIPELINES', 'image-portrait-stylization', 'unet-person-image-cartoon')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_cartoon_pipeline.py", "imports": ["numpy", "cv2", "os", "tensorflow", "typing"], "module": "modelscope.pipelines.cv.image_cartoon_pipeline"}, "('PIPELINES', 'image-segmentation', 'fast-instance-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/fast_instance_segmentation_pipeline.py", "imports": ["numpy", "torch", "typing", "torchvision"], "module": "modelscope.pipelines.cv.fast_instance_segmentation_pipeline"}, "('PIPELINES', 'movie-scene-segmentation', 'resnet50-bert-movie-scene-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/movie_scene_segmentation_pipeline.py", "imports": ["typing", "torch"], "module": "modelscope.pipelines.cv.movie_scene_segmentation_pipeline"}, "('PIPELINES', 'language-guided-video-summarization', 'clip-it-video-summarization')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/language_guided_video_summarization_pipeline.py", "imports": ["PIL", "numpy", "tempfile", "cv2", "os", "shutil", "clip", "random", "typing", "torch"], "module": "modelscope.pipelines.cv.language_guided_video_summarization_pipeline"}, "('PIPELINES', 'image-denoising', 'nafnet-image-denoise')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_denoise_pipeline.py", "imports": ["typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.image_denoise_pipeline"}, "('PIPELINES', 'face-quality-assessment', 'manual-face-quality-assessment-fqa')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_quality_assessment_pipeline.py", "imports": ["onnxruntime", "numpy", "PIL", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.face_quality_assessment_pipeline"}, "('PIPELINES', 'text-driven-segmentation', 'text-driven-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/text_driven_segmentation_pipleline.py", "imports": ["typing"], "module": "modelscope.pipelines.cv.text_driven_segmentation_pipleline"}, "('PIPELINES', 'image-object-detection', 'vidt')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/vidt_pipeline.py", "imports": ["typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.vidt_pipeline"}, "('PIPELINES', 'image-to-image-generation', 'image-to-image-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_to_image_generate_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.image_to_image_generate_pipeline"}, "('PIPELINES', 'nerf-recon-4k', 'nerf-recon-4k')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/nerf_recon_4k_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.cv.nerf_recon_4k_pipeline"}, "('PIPELINES', 'image-inpainting', 'image-inpainting-sdv2')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_inpainting_sdv2_pipeline.py", "imports": ["numpy", "tempfile", "cv2", "math", "os", "sys", "typing", "torch", "diffusers"], "module": "modelscope.pipelines.cv.image_inpainting_sdv2_pipeline"}, "('PIPELINES', 'image-super-resolution', 'rrdb-image-super-resolution')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_super_resolution_pipeline.py", "imports": ["PIL", "numpy", "cv2", "typing", "torch"], "module": "modelscope.pipelines.cv.image_super_resolution_pipeline"}, "('PIPELINES', 'face-detection', 'resnet101-face-detection-cvpr22papermogface')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/mog_face_detection_pipeline.py", "imports": ["numpy", "typing", "os"], "module": "modelscope.pipelines.cv.mog_face_detection_pipeline"}, "('PIPELINES', 'image-quality-assessment-mos', 'image-quality-assessment-mos')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_quality_assessment_mos_pipeline.py", "imports": ["numpy", "tempfile", "cv2", "math", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.image_quality_assessment_mos_pipeline"}, "('PIPELINES', 'image-body-reshaping', 'flow-based-body-reshaping')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_body_reshaping_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.cv.image_body_reshaping_pipeline"}, "('PIPELINES', 'face-detection', 'resnet50-face-detection-retinaface')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/retina_face_detection_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.retina_face_detection_pipeline"}, "('PIPELINES', 'image-quality-assessment-degradation', 'image-quality-assessment-degradation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_quality_assessment_degradation_pipeline.py", "imports": ["numpy", "tempfile", "cv2", "math", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.image_quality_assessment_degradation_pipeline"}, "('PIPELINES', 'nerf-recon-acc', 'nerf-recon-acc')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/nerf_recon_acc_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.cv.nerf_recon_acc_pipeline"}, "('PIPELINES', 'panorama-depth-estimation', 'panorama-depth-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/panorama_depth_estimation_pipeline.py", "imports": ["PIL", "numpy", "cv2", "typing", "torch"], "module": "modelscope.pipelines.cv.panorama_depth_estimation_pipeline"}, "('PIPELINES', 'image-segmentation', 'vision-middleware-multi-task')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/vision_middleware_pipeline.py", "imports": ["numpy", "math", "os", "typing", "torch", "mmcv", "torchvision"], "module": "modelscope.pipelines.cv.vision_middleware_pipeline"}, "('PIPELINES', 'face-liveness', 'manual-face-liveness-flir')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_liveness_ir_pipeline.py", "imports": ["onnxruntime", "numpy", "PIL", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.face_liveness_ir_pipeline"}, "('PIPELINES', 'lineless-table-recognition', 'lore-lineless-table-recognition')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/lineless_table_recognition_pipeline.py", "imports": ["PIL", "numpy", "cv2", "math", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.lineless_table_recognition_pipeline"}, "('PIPELINES', 'facial-expression-recognition', 'vgg19-facial-expression-recognition-fer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/facial_expression_recognition_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.facial_expression_recognition_pipeline"}, "('PIPELINES', 'skin-retouching', 'unet-skin-retouching')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/skin_retouching_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "tensorflow", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.skin_retouching_pipeline"}, "('PIPELINES', 'semantic-segmentation', 'ddpm-image-semantic-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ddpm_semantic_segmentation_pipeline.py", "imports": ["typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.ddpm_semantic_segmentation_pipeline"}, "('PIPELINES', 'image-colorization', 'unet-image-colorization')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_colorization_pipeline.py", "imports": ["PIL", "numpy", "cv2", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.image_colorization_pipeline"}, "('PIPELINES', 'open-vocabulary-detection', 'open-vocabulary-detection-vild')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_open_vocabulary_detection_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.image_open_vocabulary_detection_pipeline"}, "('PIPELINES', 'image-depth-estimation', 'image-depth-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_depth_estimation_pipeline.py", "imports": ["PIL", "numpy", "cv2", "typing", "torch"], "module": "modelscope.pipelines.cv.image_depth_estimation_pipeline"}, "('PIPELINES', 'indoor-layout-estimation', 'indoor-layout-estimation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/indoor_layout_estimation_pipeline.py", "imports": ["numpy", "cv2", "typing"], "module": "modelscope.pipelines.cv.indoor_layout_estimation_pipeline"}, "('PIPELINES', 'image-try-on', 'image-try-on')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_try_on_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.cv.image_try_on_pipeline"}, "('PIPELINES', 'video-super-resolution', 'realbasicvsr-video-super-resolution')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_super_resolution_pipeline.py", "imports": ["numpy", "tempfile", "cv2", "math", "os", "subprocess", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.video_super_resolution_pipeline"}, "('PIPELINES', 'video-instance-segmentation', 'video-instance-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_instance_segmentation_pipeline.py", "imports": ["tqdm", "numpy", "cv2", "os", "typing", "torch", "mmcv"], "module": "modelscope.pipelines.cv.video_instance_segmentation_pipeline"}, "('PIPELINES', 'card-detection', 'resnet-card-detection-scrfd34gkps')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/card_detection_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.cv.card_detection_pipeline"}, "('PIPELINES', 'portrait-matting', 'unet-image-matting')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_matting_pipeline.py", "imports": ["numpy", "cv2", "os", "tensorflow", "typing"], "module": "modelscope.pipelines.cv.image_matting_pipeline"}, "('PIPELINES', 'universal-matting', 'unet-universal-matting')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_matting_pipeline.py", "imports": ["numpy", "cv2", "os", "tensorflow", "typing"], "module": "modelscope.pipelines.cv.image_matting_pipeline"}, "('PIPELINES', 'referring-video-object-segmentation', 'referring-video-object-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/referring_video_object_segmentation_pipeline.py", "imports": ["moviepy", "numpy", "PIL", "tqdm", "tempfile", "einops", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.referring_video_object_segmentation_pipeline"}, "('PIPELINES', 'face-recognition', 'manual-face-recognition-frir')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_recognition_onnx_ir_pipeline.py", "imports": ["onnxruntime", "numpy", "PIL", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.face_recognition_onnx_ir_pipeline"}, "('PIPELINES', 'general-recognition', 'resnet101-general-recognition')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/general_recognition_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.general_recognition_pipeline"}, "('PIPELINES', 'motion-generation', 'mdm-motion-generation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/motion_generation_pipeline.py", "imports": ["numpy", "tempfile", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.motion_generation_pipeline"}, "('PIPELINES', 'image-color-enhancement', 'adaint-image-color-enhance')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_color_enhance_pipeline.py", "imports": ["typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.image_color_enhance_pipeline"}, "('PIPELINES', 'image-color-enhancement', 'deeplpf-image-color-enhance')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_color_enhance_pipeline.py", "imports": ["typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.image_color_enhance_pipeline"}, "('PIPELINES', 'image-color-enhancement', 'csrnet-image-color-enhance')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_color_enhance_pipeline.py", "imports": ["typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.image_color_enhance_pipeline"}, "('PIPELINES', 'object-detection-3d', 'object-detection-3d-depe')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/object_detection_3d_pipeline.py", "imports": ["PIL", "numpy", "tempfile", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.object_detection_3d_pipeline"}, "('PIPELINES', 'video-single-object-tracking', 'procontext-vitb-video-single-object-tracking')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_single_object_tracking_pipeline.py", "imports": ["typing", "cv2", "os"], "module": "modelscope.pipelines.cv.video_single_object_tracking_pipeline"}, "('PIPELINES', 'video-single-object-tracking', 'ostrack-vitb-video-single-object-tracking')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_single_object_tracking_pipeline.py", "imports": ["typing", "cv2", "os"], "module": "modelscope.pipelines.cv.video_single_object_tracking_pipeline"}, "('PIPELINES', 'face-detection', 'manual-face-detection-ulfd')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ulfd_face_detection_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.ulfd_face_detection_pipeline"}, "('PIPELINES', 'pedestrian-attribute-recognition', 'resnet50_pedestrian-attribute-recognition_image')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/pedestrian_attribute_recognition_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "json", "torchvision"], "module": "modelscope.pipelines.cv.pedestrian_attribute_recognition_pipeline"}, "('PIPELINES', 'face-reconstruction', 'resnet50-face-reconstruction')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_reconstruction_pipeline.py", "imports": ["face_alignment", "PIL", "numpy", "cv2", "os", "shutil", "tensorflow", "typing", "torch", "io", "scipy"], "module": "modelscope.pipelines.cv.face_reconstruction_pipeline"}, "('PIPELINES', 'image-style-transfer', 'AAMS-style-transfer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_style_transfer_pipeline.py", "imports": ["numpy", "typing", "cv2", "os"], "module": "modelscope.pipelines.cv.image_style_transfer_pipeline"}, "('PIPELINES', 'ocr-recognition', 'convnextTiny-ocr-recognition')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ocr_recognition_pipeline.py", "imports": [], "module": "modelscope.pipelines.cv.ocr_recognition_pipeline"}, "('PIPELINES', 'image-segmentation', 'image-semantic-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_semantic_segmentation_pipeline.py", "imports": ["PIL", "numpy", "cv2", "typing", "torch"], "module": "modelscope.pipelines.cv.image_semantic_segmentation_pipeline"}, "('PIPELINES', 'image-classification', 'image-structured-model-probing')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_structured_model_probing_pipeline.py", "imports": ["numpy", "math", "os", "typing", "torch", "mmcv", "torchvision"], "module": "modelscope.pipelines.cv.image_structured_model_probing_pipeline"}, "('PIPELINES', 'image-segmentation', 'cascade-mask-rcnn-swin-image-instance-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_instance_segmentation_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.image_instance_segmentation_pipeline"}, "('PIPELINES', 'image-segmentation', 'image-panoptic-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_panoptic_segmentation_pipeline.py", "imports": ["PIL", "numpy", "cv2", "typing", "torch"], "module": "modelscope.pipelines.cv.image_panoptic_segmentation_pipeline"}, "('PIPELINES', 'face-human-hand-detection', 'face-human-hand-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_human_hand_detection_pipeline.py", "imports": ["numpy", "typing"], "module": "modelscope.pipelines.cv.face_human_hand_detection_pipeline"}, "('PIPELINES', 'video-summarization', 'googlenet_pgl_video_summarization')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_summarization_pipeline.py", "imports": ["tqdm", "numpy", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.video_summarization_pipeline"}, "('PIPELINES', 'image-quality-assessment-mos', 'image-quality-assessment-man')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_quality_assessment_man_pipeline.py", "imports": ["numpy", "tempfile", "cv2", "math", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.image_quality_assessment_man_pipeline"}, "('PIPELINES', 'body-3d-keypoints', 'canonical_body-3d-keypoints_video')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/body_3d_keypoints_pipeline.py", "imports": ["numpy", "tempfile", "cv2", "os", "mpl_toolkits", "typing", "matplotlib", "torch", "datetime"], "module": "modelscope.pipelines.cv.body_3d_keypoints_pipeline"}, "('PIPELINES', 'face-recognition', 'ir-face-recognition-rts')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_recognition_ood_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.face_recognition_ood_pipeline"}, "('PIPELINES', 'video-deinterlace', 'video-deinterlace')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_deinterlace_pipeline.py", "imports": ["numpy", "tempfile", "cv2", "math", "os", "subprocess", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.video_deinterlace_pipeline"}, "('PIPELINES', 'virtual-try-on', 'virtual-try-on')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/virtual_try_on_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.virtual_try_on_pipeline"}, "('PIPELINES', 'ocr-detection', 'resnet18-ocr-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ocr_detection_pipeline.py", "imports": ["numpy", "math", "cv2", "os", "tensorflow", "typing", "tf_slim", "torch"], "module": "modelscope.pipelines.cv.ocr_detection_pipeline"}, "('PIPELINES', 'face-recognition', 'manual-face-recognition-frfm')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_recognition_onnx_fm_pipeline.py", "imports": ["onnxruntime", "numpy", "PIL", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.face_recognition_onnx_fm_pipeline"}, "('PIPELINES', 'animal-recognition', 'resnet101-animal-recognition')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/animal_recognition_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.animal_recognition_pipeline"}, "('PIPELINES', 'image-inpainting', 'fft-inpainting')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_inpainting_pipeline.py", "imports": ["PIL", "numpy", "cv2", "typing", "torch"], "module": "modelscope.pipelines.cv.image_inpainting_pipeline"}, "('PIPELINES', 'semantic-segmentation', 'u2net-salient-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_salient_detection_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.cv.image_salient_detection_pipeline"}, "('PIPELINES', 'semantic-segmentation', 'res2net-salient-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_salient_detection_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.cv.image_salient_detection_pipeline"}, "('PIPELINES', 'semantic-segmentation', 'res2net-camouflaged-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_salient_detection_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.cv.image_salient_detection_pipeline"}, "('PIPELINES', 'bad-image-detecting', 'bad-image-detecting')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/bad_image_detecting_pipeline.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.pipelines.cv.bad_image_detecting_pipeline"}, "('PIPELINES', 'product-retrieval-embedding', 'resnet50-product-retrieval-embedding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/product_retrieval_embedding_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.product_retrieval_embedding_pipeline"}, "('PIPELINES', 'video-category', 'video-category')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_category_pipeline.py", "imports": ["PIL", "numpy", "os", "typing", "torch", "json", "decord", "torchvision"], "module": "modelscope.pipelines.cv.video_category_pipeline"}, "('PIPELINES', 'face-detection', 'resnet-face-detection-scrfd10gkps')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_detection_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.face_detection_pipeline"}, "('PIPELINES', 'face-liveness', 'manual-face-liveness-flxc')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_liveness_xc_pipeline.py", "imports": ["onnxruntime", "numpy", "PIL", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.face_liveness_xc_pipeline"}, "('PIPELINES', 'panorama-depth-estimation', 'panorama-depth-estimation-s2net')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/panorama_depth_estimation_s2net_pipeline.py", "imports": ["PIL", "numpy", "cv2", "typing", "torch"], "module": "modelscope.pipelines.cv.panorama_depth_estimation_s2net_pipeline"}, "('PIPELINES', 'face-detection', 'manual-face-detection-mtcnn')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/mtcnn_face_detection_pipeline.py", "imports": ["typing", "torch", "os"], "module": "modelscope.pipelines.cv.mtcnn_face_detection_pipeline"}, "('PIPELINES', 'video-object-segmentation', 'video-object-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_object_segmentation_pipeline.py", "imports": ["PIL", "numpy", "os", "typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.video_object_segmentation_pipeline"}, "('PIPELINES', 'face-2d-keypoints', 'manual-facial-landmark-confidence-flcm')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/facial_landmark_confidence_pipeline.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "module": "modelscope.pipelines.cv.facial_landmark_confidence_pipeline"}, "('PIPELINES', 'image-demoireing', 'uhdm-image-demoireing')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_restoration_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.cv.image_restoration_pipeline"}, "('PIPELINES', 'image-debanding', 'rrdb-image-debanding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_debanding_pipeline.py", "imports": ["typing", "torch", "torchvision"], "module": "modelscope.pipelines.cv.image_debanding_pipeline"}, "('PIPELINES', 'live-category', 'live-category')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/live_category_pipeline.py", "imports": ["PIL", "numpy", "os", "typing", "torch", "decord", "torchvision"], "module": "modelscope.pipelines.cv.live_category_pipeline"}, "('PIPELINES', 'hand-static', 'hand-static')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/hand_static_pipeline.py", "imports": ["numpy", "typing"], "module": "modelscope.pipelines.cv.hand_static_pipeline"}, "('PIPELINES', 'domain-specific-object-detection', 'tinynas-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/tinynas_detection_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.cv.tinynas_detection_pipeline"}, "('PIPELINES', 'image-object-detection', 'tinynas-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/tinynas_detection_pipeline.py", "imports": ["typing"], "module": "modelscope.pipelines.cv.tinynas_detection_pipeline"}, "('PIPELINES', 'protein-structure', 'unifold-protein-structure')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/science/protein_structure_pipeline.py", "imports": ["numpy", "time", "os", "typing", "unicore", "torch", "json"], "module": "modelscope.pipelines.science.protein_structure_pipeline"}, "('PIPELINES', 'task-template', 'pipeline-template')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/pipeline_template.py", "imports": ["numpy", "typing"], "module": "modelscope.pipelines.pipeline_template"}, "('PREPROCESSORS', 'nlp', 're-tokenizer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/relation_extraction_preprocessor.py", "imports": ["typing", "transformers"], "module": "modelscope.preprocessors.nlp.relation_extraction_preprocessor"}, "('PREPROCESSORS', 'nlp', 'dialog-state-tracking-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space/dialog_state_tracking_preprocessor.py", "imports": ["typing"], "module": "modelscope.preprocessors.nlp.space.dialog_state_tracking_preprocessor"}, "('PREPROCESSORS', 'nlp', 'dialog-intent-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space/dialog_intent_prediction_preprocessor.py", "imports": ["typing", "json", "os"], "module": "modelscope.preprocessors.nlp.space.dialog_intent_prediction_preprocessor"}, "('PREPROCESSORS', 'nlp', 'dialog-modeling-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space/dialog_modeling_preprocessor.py", "imports": ["typing", "os"], "module": "modelscope.preprocessors.nlp.space.dialog_modeling_preprocessor"}, "('PREPROCESSORS', 'nlp', 'conversational-text-to-sql')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space_T_en/conversational_text_to_sql_preprocessor.py", "imports": ["os", "text2sql_lgesql", "typing", "torch", "json"], "module": "modelscope.preprocessors.nlp.space_T_en.conversational_text_to_sql_preprocessor"}, "('PREPROCESSORS', 'nlp', 'nli-tokenizer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/text_classification_preprocessor.py", "imports": ["numpy", "typing"], "module": "modelscope.preprocessors.nlp.text_classification_preprocessor"}, "('PREPROCESSORS', 'nlp', 'sen-sim-tokenizer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/text_classification_preprocessor.py", "imports": ["numpy", "typing"], "module": "modelscope.preprocessors.nlp.text_classification_preprocessor"}, "('PREPROCESSORS', 'nlp', 'bert-seq-cls-tokenizer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/text_classification_preprocessor.py", "imports": ["numpy", "typing"], "module": "modelscope.preprocessors.nlp.text_classification_preprocessor"}, "('PREPROCESSORS', 'nlp', 'sen-cls-tokenizer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/text_classification_preprocessor.py", "imports": ["numpy", "typing"], "module": "modelscope.preprocessors.nlp.text_classification_preprocessor"}, "('PREPROCESSORS', 'nlp', 'document-grounded-dialog-generate')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/document_grounded_dialog_generate_preprocessor.py", "imports": ["typing", "transformers", "torch", "os"], "module": "modelscope.preprocessors.nlp.document_grounded_dialog_generate_preprocessor"}, "('PREPROCESSORS', 'nlp', 'viet-ner-tokenizer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/token_classification_viet_preprocessor.py", "imports": ["typing", "torch"], "module": "modelscope.preprocessors.nlp.token_classification_viet_preprocessor"}, "('PREPROCESSORS', 'nlp', 'word-alignment')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/word_alignment_preprocessor.py", "imports": ["numpy", "itertools", "os", "typing", "torch"], "module": "modelscope.preprocessors.nlp.word_alignment_preprocessor"}, "('PREPROCESSORS', 'nlp', 'siamese-uie-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/siamese_uie_preprocessor.py", "imports": ["typing", "transformers"], "module": "modelscope.preprocessors.nlp.siamese_uie_preprocessor"}, "('PREPROCESSORS', 'nlp', 'mgeo-ranking')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/mgeo_ranking_preprocessor.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.preprocessors.nlp.mgeo_ranking_preprocessor"}, "('PREPROCESSORS', 'nlp', 'canmt-translation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/canmt_translation.py", "imports": ["os", "typing", "subword_nmt", "torch", "jieba", "sacremoses"], "module": "modelscope.preprocessors.nlp.canmt_translation"}, "('PREPROCESSORS', 'nlp', 'sentence-embedding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/sentence_embedding_preprocessor.py", "imports": ["typing"], "module": "modelscope.preprocessors.nlp.sentence_embedding_preprocessor"}, "('PREPROCESSORS', 'nlp', 'zero-shot-cls-tokenizer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/zero_shot_classification_preprocessor.py", "imports": ["typing"], "module": "modelscope.preprocessors.nlp.zero_shot_classification_preprocessor"}, "('PREPROCESSORS', 'nlp', 'translation-evaluation-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/translation_evaluation_preprocessor.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.preprocessors.nlp.translation_evaluation_preprocessor"}, "('PREPROCESSORS', 'nlp', 'dialog-use-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/dialog_classification_use_preprocessor.py", "imports": ["typing", "transformers", "torch"], "module": "modelscope.preprocessors.nlp.dialog_classification_use_preprocessor"}, "('PREPROCESSORS', 'nlp', 'thai-ner-tokenizer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/token_classification_thai_preprocessor.py", "imports": ["typing"], "module": "modelscope.preprocessors.nlp.token_classification_thai_preprocessor"}, "('PREPROCESSORS', 'nlp', 'thai-wseg-tokenizer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/token_classification_thai_preprocessor.py", "imports": ["typing"], "module": "modelscope.preprocessors.nlp.token_classification_thai_preprocessor"}, "('PREPROCESSORS', 'nlp', 'document-grounded-dialog-rerank')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/document_grounded_dialog_rerank_preprocessor.py", "imports": ["copy", "os", "typing", "transformers", "torch"], "module": "modelscope.preprocessors.nlp.document_grounded_dialog_rerank_preprocessor"}, "('PREPROCESSORS', 'nlp', 'table-question-answering-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space_T_cn/table_question_answering_preprocessor.py", "imports": ["typing", "transformers", "torch", "os"], "module": "modelscope.preprocessors.nlp.space_T_cn.table_question_answering_preprocessor"}, "('PREPROCESSORS', 'nlp', 'Tokenize')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/bert_seq_cls_tokenizer.py", "imports": ["typing", "transformers"], "module": "modelscope.preprocessors.nlp.bert_seq_cls_tokenizer"}, "('PREPROCESSORS', 'nlp', 'document-grounded-dialog-retrieval')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/document_grounded_dialog_retrieval_preprocessor.py", "imports": ["typing", "transformers", "torch", "os"], "module": "modelscope.preprocessors.nlp.document_grounded_dialog_retrieval_preprocessor"}, "('PREPROCESSORS', 'nlp', 'fill-mask')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/fill_mask_preprocessor.py", "imports": ["numpy", "os", "abc", "re", "typing", "torch"], "module": "modelscope.preprocessors.nlp.fill_mask_preprocessor"}, "('PREPROCESSORS', 'nlp', 'fill-mask-ponet')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/fill_mask_preprocessor.py", "imports": ["numpy", "os", "abc", "re", "typing", "torch"], "module": "modelscope.preprocessors.nlp.fill_mask_preprocessor"}, "('PREPROCESSORS', 'nlp', 'machine-reading-comprehension-for-ner')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/machine_reading_comprehension_preprocessor.py", "imports": ["transformers", "torch", "os"], "module": "modelscope.preprocessors.nlp.machine_reading_comprehension_preprocessor"}, "('PREPROCESSORS', 'nlp', 'text-error-correction')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/text_error_correction.py", "imports": ["typing", "transformers", "torch", "os"], "module": "modelscope.preprocessors.nlp.text_error_correction"}, "('PREPROCESSORS', 'nlp', 'word-segment-text-to-label-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/token_classification_preprocessor.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.preprocessors.nlp.token_classification_preprocessor"}, "('PREPROCESSORS', 'nlp', 'ner-tokenizer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/token_classification_preprocessor.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.preprocessors.nlp.token_classification_preprocessor"}, "('PREPROCESSORS', 'nlp', 'token-cls-tokenizer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/token_classification_preprocessor.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.preprocessors.nlp.token_classification_preprocessor"}, "('PREPROCESSORS', 'nlp', 'sequence-labeling-tokenizer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/token_classification_preprocessor.py", "imports": ["numpy", "torch", "typing"], "module": "modelscope.preprocessors.nlp.token_classification_preprocessor"}, "('PREPROCESSORS', 'nlp', 'faq-question-answering-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/faq_question_answering_preprocessor.py", "imports": ["typing", "torch"], "module": "modelscope.preprocessors.nlp.faq_question_answering_preprocessor"}, "('PREPROCESSORS', 'nlp', 'feature-extraction')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/feature_extraction_preprocessor.py", "imports": ["numpy", "typing"], "module": "modelscope.preprocessors.nlp.feature_extraction_preprocessor"}, "('PREPROCESSORS', 'nlp', 'text-ranking')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/text_ranking_preprocessor.py", "imports": ["typing", "transformers"], "module": "modelscope.preprocessors.nlp.text_ranking_preprocessor"}, "('PREPROCESSORS', 'nlp', 'text-gen-tokenizer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/text_generation_preprocessor.py", "imports": ["numpy", "torch", "typing", "os"], "module": "modelscope.preprocessors.nlp.text_generation_preprocessor"}, "('PREPROCESSORS', 'nlp', 'text-gen-jieba-tokenizer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/text_generation_preprocessor.py", "imports": ["numpy", "torch", "typing", "os"], "module": "modelscope.preprocessors.nlp.text_generation_preprocessor"}, "('PREPROCESSORS', 'nlp', 'sentence-piece')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/text_generation_preprocessor.py", "imports": ["numpy", "torch", "typing", "os"], "module": "modelscope.preprocessors.nlp.text_generation_preprocessor"}, "('PREPROCESSORS', 'nlp', 'text2text-gen-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/text_generation_preprocessor.py", "imports": ["numpy", "torch", "typing", "os"], "module": "modelscope.preprocessors.nlp.text_generation_preprocessor"}, "('PREPROCESSORS', 'nlp', 'mglm-summarization')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/mglm_summarization_preprocessor.py", "imports": ["typing", "re", "os"], "module": "modelscope.preprocessors.nlp.mglm_summarization_preprocessor"}, "('PREPROCESSORS', 'nlp', 'document-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/document_segmentation_preprocessor.py", "imports": ["typing"], "module": "modelscope.preprocessors.nlp.document_segmentation_preprocessor"}, "('PREPROCESSORS', 'audio', 'wav-to-scp')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/asr.py", "imports": ["typing", "os"], "module": "modelscope.preprocessors.asr"}, "('PREPROCESSORS', 'cv', 'load-image')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/image.py", "imports": ["PIL", "numpy", "cv2", "typing", "io"], "module": "modelscope.preprocessors.image"}, "('PREPROCESSORS', 'cv', 'object-detection-tinynas-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/image.py", "imports": ["PIL", "numpy", "cv2", "typing", "io"], "module": "modelscope.preprocessors.image"}, "('PREPROCESSORS', 'cv', 'image-color-enhance-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/image.py", "imports": ["PIL", "numpy", "cv2", "typing", "io"], "module": "modelscope.preprocessors.image"}, "('PREPROCESSORS', 'cv', 'image-denoise-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/image.py", "imports": ["PIL", "numpy", "cv2", "typing", "io"], "module": "modelscope.preprocessors.image"}, "('PREPROCESSORS', 'cv', 'image-deblur-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/image.py", "imports": ["PIL", "numpy", "cv2", "typing", "io"], "module": "modelscope.preprocessors.image"}, "('PREPROCESSORS', 'cv', 'image-portrait-enhancement-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/image.py", "imports": ["PIL", "numpy", "cv2", "typing", "io"], "module": "modelscope.preprocessors.image"}, "('PREPROCESSORS', 'cv', 'image-instance-segmentation-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/image.py", "imports": ["PIL", "numpy", "cv2", "typing", "io"], "module": "modelscope.preprocessors.image"}, "('PREPROCESSORS', 'cv', 'video-summarization-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/image.py", "imports": ["PIL", "numpy", "cv2", "typing", "io"], "module": "modelscope.preprocessors.image"}, "('PREPROCESSORS', 'cv', 'image-classification-bypass-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/image.py", "imports": ["PIL", "numpy", "cv2", "typing", "io"], "module": "modelscope.preprocessors.image"}, "('PREPROCESSORS', 'audio', 'LinearAECAndFbank')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/audio.py", "imports": ["numpy", "os", "typing", "torch", "io", "scipy"], "module": "modelscope.preprocessors.audio"}, "('PREPROCESSORS', 'audio', 'wav-to-lists')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/kws.py", "imports": ["typing", "yaml", "os"], "module": "modelscope.preprocessors.kws"}, "('PREPROCESSORS', 'multi-modal', 'diffusion-image-generation-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/multi_modal.py", "imports": ["PIL", "numpy", "timm", "os", "re", "typing", "torch", "io", "json", "decord", "torchvision"], "module": "modelscope.preprocessors.multi_modal"}, "('PREPROCESSORS', 'multi-modal', 'ofa-tasks-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/multi_modal.py", "imports": ["PIL", "numpy", "timm", "os", "re", "typing", "torch", "io", "json", "decord", "torchvision"], "module": "modelscope.preprocessors.multi_modal"}, "('PREPROCESSORS', 'multi-modal', 'clip-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/multi_modal.py", "imports": ["PIL", "numpy", "timm", "os", "re", "typing", "torch", "io", "json", "decord", "torchvision"], "module": "modelscope.preprocessors.multi_modal"}, "('PREPROCESSORS', 'multi-modal', 'mplug-tasks-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/multi_modal.py", "imports": ["PIL", "numpy", "timm", "os", "re", "typing", "torch", "io", "json", "decord", "torchvision"], "module": "modelscope.preprocessors.multi_modal"}, "('PREPROCESSORS', 'multi-modal', 'vldoc-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/multi_modal.py", "imports": ["PIL", "numpy", "timm", "os", "re", "typing", "torch", "io", "json", "decord", "torchvision"], "module": "modelscope.preprocessors.multi_modal"}, "('PREPROCESSORS', 'multi-modal', 'hitea-tasks-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/multi_modal.py", "imports": ["PIL", "numpy", "timm", "os", "re", "typing", "torch", "io", "json", "decord", "torchvision"], "module": "modelscope.preprocessors.multi_modal"}, "('PREPROCESSORS', 'multi-modal', 'mplug-owl-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/multi_modal.py", "imports": ["PIL", "numpy", "timm", "os", "re", "typing", "torch", "io", "json", "decord", "torchvision"], "module": "modelscope.preprocessors.multi_modal"}, "('PREPROCESSORS', 'multi-modal', 'image-captioning-clip-interrogator-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/multi_modal.py", "imports": ["PIL", "numpy", "timm", "os", "re", "typing", "torch", "io", "json", "decord", "torchvision"], "module": "modelscope.preprocessors.multi_modal"}, "('PREPROCESSORS', 'cv', 'bad-image-detecting-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/bad_image_detecting_preprocessor.py", "imports": ["PIL", "numpy", "math", "typing", "torch", "torchvision"], "module": "modelscope.preprocessors.cv.bad_image_detecting_preprocessor"}, "('PREPROCESSORS', 'cv', 'image-quality_assessment-man-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/image_quality_assessment_man.py", "imports": ["PIL", "numpy", "math", "typing", "torch", "torchvision"], "module": "modelscope.preprocessors.cv.image_quality_assessment_man"}, "('PREPROCESSORS', 'cv', 'image-classification-mmcv-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/mmcls_preprocessor.py", "imports": ["numpy", "typing", "os"], "module": "modelscope.preprocessors.cv.mmcls_preprocessor"}, "('PREPROCESSORS', 'cv', 'image-demoire-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/image_restoration_preprocessor.py", "imports": ["PIL", "numpy", "math", "typing", "torch", "torchvision"], "module": "modelscope.preprocessors.cv.image_restoration_preprocessor"}, "('PREPROCESSORS', 'cv', 'image-quality_assessment-mos-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/image_quality_assessment_mos.py", "imports": ["numpy", "math", "cv2", "typing", "torchvision"], "module": "modelscope.preprocessors.cv.image_quality_assessment_mos"}, "('PREPROCESSORS', 'cv', 'controllable-image-generation-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/controllable_image_generation.py", "imports": ["PIL", "numpy", "cv2", "math", "os", "typing", "torch", "torchvision"], "module": "modelscope.preprocessors.cv.controllable_image_generation"}, "('PREPROCESSORS', 'cv', 'RandomCrop')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/image_classification_preprocessor.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "module": "modelscope.preprocessors.cv.image_classification_preprocessor"}, "('PREPROCESSORS', 'cv', 'RandomResizedCrop')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/image_classification_preprocessor.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "module": "modelscope.preprocessors.cv.image_classification_preprocessor"}, "('PREPROCESSORS', 'cv', 'Resize')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/image_classification_preprocessor.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "module": "modelscope.preprocessors.cv.image_classification_preprocessor"}, "('PREPROCESSORS', 'cv', 'CenterCrop')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/image_classification_preprocessor.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "module": "modelscope.preprocessors.cv.image_classification_preprocessor"}, "('PREPROCESSORS', 'cv', 'RandomHorizontalFlip')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/image_classification_preprocessor.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "module": "modelscope.preprocessors.cv.image_classification_preprocessor"}, "('PREPROCESSORS', 'cv', 'Normalize')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/image_classification_preprocessor.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "module": "modelscope.preprocessors.cv.image_classification_preprocessor"}, "('PREPROCESSORS', 'cv', 'ImageToTensor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/image_classification_preprocessor.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "module": "modelscope.preprocessors.cv.image_classification_preprocessor"}, "('PREPROCESSORS', 'cv', 'image-classification-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/image_classification_preprocessor.py", "imports": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "module": "modelscope.preprocessors.cv.image_classification_preprocessor"}, "('PREPROCESSORS', 'text-to-speech', 'kantts-data-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/tts.py", "imports": ["typing", "kantts", "os"], "module": "modelscope.preprocessors.tts"}, "('PREPROCESSORS', 'audio', 'sen-cls-tokenizer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/speaker.py", "imports": ["typing", "torch"], "module": "modelscope.preprocessors.speaker"}, "('PREPROCESSORS', 'audio', 'token-cls-tokenizer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/speaker.py", "imports": ["typing", "torch"], "module": "modelscope.preprocessors.speaker"}, "('PREPROCESSORS', 'cv', 'movie-scene-segmentation-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/video.py", "imports": ["numpy", "tempfile", "math", "os", "random", "uuid", "torch", "urllib", "decord", "torchvision"], "module": "modelscope.preprocessors.video"}, "('PREPROCESSORS', 'default', 'Compose')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/common.py", "imports": ["numpy", "time", "typing", "torch", "collections"], "module": "modelscope.preprocessors.common"}, "('PREPROCESSORS', 'default', 'ToTensor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/common.py", "imports": ["numpy", "time", "typing", "torch", "collections"], "module": "modelscope.preprocessors.common"}, "('PREPROCESSORS', 'default', 'Filter')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/common.py", "imports": ["numpy", "time", "typing", "torch", "collections"], "module": "modelscope.preprocessors.common"}, "('PREPROCESSORS', 'default', 'ToNumpy')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/common.py", "imports": ["numpy", "time", "typing", "torch", "collections"], "module": "modelscope.preprocessors.common"}, "('PREPROCESSORS', 'default', 'Rename')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/common.py", "imports": ["numpy", "time", "typing", "torch", "collections"], "module": "modelscope.preprocessors.common"}, "('PREPROCESSORS', 'default', 'Identity')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/common.py", "imports": ["numpy", "time", "typing", "torch", "collections"], "module": "modelscope.preprocessors.common"}, "('PREPROCESSORS', 'science', 'unifold-preprocessor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/science/uni_fold.py", "imports": ["logging", "os", "tarfile", "requests", "re", "random", "unittest", "pathlib", "pickle", "json", "gzip", "tqdm", "numpy", "time", "ipdb", "hashlib", "typing", "torch"], "module": "modelscope.preprocessors.science.uni_fold"}, "('TRAINERS', 'default', 'faq-question-answering-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/faq_question_answering_trainer.py", "imports": ["distutils", "numpy", "functools", "contextlib", "dataclasses", "typing", "torch", "collections"], "module": "modelscope.trainers.nlp.faq_question_answering_trainer"}, "('TRAINERS', 'default', 'dialog-modeling-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/space/dialog_modeling_trainer.py", "imports": ["numpy", "os", "typing", "time"], "module": "modelscope.trainers.nlp.space.dialog_modeling_trainer"}, "('TRAINERS', 'default', 'dialog-intent-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/space/dialog_intent_trainer.py", "imports": ["numpy", "typing", "os"], "module": "modelscope.trainers.nlp.space.dialog_intent_trainer"}, "('TRAINERS', 'default', 'csanmt-translation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/csanmt_translation_trainer.py", "imports": ["typing", "os", "tensorflow", "time"], "module": "modelscope.trainers.nlp.csanmt_translation_trainer"}, "('TRAINERS', 'default', 'document-grounded-dialog-rerank-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/document_grounded_dialog_rerank_trainer.py", "imports": ["numpy", "time", "os", "random", "typing", "transformers", "torch"], "module": "modelscope.trainers.nlp.document_grounded_dialog_rerank_trainer"}, "('TRAINERS', 'default', 'nlp-text-ranking-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/text_ranking_trainer.py", "imports": ["tqdm", "numpy", "time", "dataclasses", "typing", "torch"], "module": "modelscope.trainers.nlp.text_ranking_trainer"}, "('TRAINERS', 'default', 'document-grounded-dialog-retrieval-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/document_grounded_dialog_retrieval_trainer.py", "imports": ["tqdm", "numpy", "os", "transformers", "torch", "json", "faiss"], "module": "modelscope.trainers.nlp.document_grounded_dialog_retrieval_trainer"}, "('TRAINERS', 'default', 'bert-sentiment-analysis')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/sequence_classification_trainer.py", "imports": ["numpy", "typing", "time"], "module": "modelscope.trainers.nlp.sequence_classification_trainer"}, "('TRAINERS', 'default', 'table-question-answering-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/table_question_answering_trainer.py", "imports": ["tqdm", "numpy", "time", "os", "typing", "torch", "json"], "module": "modelscope.trainers.nlp.table_question_answering_trainer"}, "('TRAINERS', 'default', 'nlp-plug-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/plug_trainer.py", "imports": ["os", "deepspeed", "typing", "torch", "megatron_util"], "module": "modelscope.trainers.nlp.plug_trainer"}, "('TRAINERS', 'default', 'text-generation-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/text_generation_trainer.py", "imports": ["typing", "torch"], "module": "modelscope.trainers.nlp.text_generation_trainer"}, "('TRAINERS', 'default', 'siamese-uie-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/siamese_uie_trainer.py", "imports": ["numpy", "math", "os", "time", "random", "typing", "torch", "collections", "json"], "module": "modelscope.trainers.nlp.siamese_uie_trainer"}, "('TRAINERS', 'default', 'nlp-gpt-moe-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/gpt_moe_trainer.py", "imports": ["os", "typing", "torch", "megatron_util", "collections"], "module": "modelscope.trainers.nlp.gpt_moe_trainer"}, "('TRAINERS', 'default', 'document-grounded-dialog-generate-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/document_grounded_dialog_generate_trainer.py", "imports": ["tqdm", "string", "os", "re", "rouge", "transformers", "torch", "sacrebleu", "collections", "json"], "module": "modelscope.trainers.nlp.document_grounded_dialog_generate_trainer"}, "('TRAINERS', 'default', 'translation-evaluation-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/translation_evaluation_trainer.py", "imports": ["tqdm", "math", "os", "random", "typing", "transformers", "torch", "pandas"], "module": "modelscope.trainers.nlp.translation_evaluation_trainer"}, "('TRAINERS', 'default', 'nlp-sentence-embedding-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/sentence_embedding_trainer.py", "imports": ["tqdm", "numpy", "time", "dataclasses", "typing", "transformers", "torch"], "module": "modelscope.trainers.nlp.sentence_embedding_trainer"}, "('TRAINERS', 'default', 'nlp-gpt3-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/gpt3_trainer.py", "imports": ["typing", "copy", "torch", "os"], "module": "modelscope.trainers.nlp.gpt3_trainer"}, "('TRAINERS', 'default', 'mplug')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/mplug/mplug_trainer.py", "imports": ["collections", "typing", "torch"], "module": "modelscope.trainers.multi_modal.mplug.mplug_trainer"}, "('TRAINERS', 'default', 'lora-diffusion')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/lora_diffusion/lora_diffusion_trainer.py", "imports": ["typing", "torch", "diffusers"], "module": "modelscope.trainers.multi_modal.lora_diffusion.lora_diffusion_trainer"}, "('TRAINERS', 'default', 'custom-diffusion')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py", "imports": ["diffusers", "numpy", "PIL", "tqdm", "itertools", "os", "random", "hashlib", "typing", "pathlib", "torch", "warnings", "json", "torchvision"], "module": "modelscope.trainers.multi_modal.custom_diffusion.custom_diffusion_trainer"}, "('TRAINERS', 'default', 'stable-diffusion')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/stable_diffusion/stable_diffusion_trainer.py", "imports": ["typing", "torch"], "module": "modelscope.trainers.multi_modal.stable_diffusion.stable_diffusion_trainer"}, "('TRAINERS', 'default', 'mgeo-ranking-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/mgeo_ranking_trainer.py", "imports": ["dataclasses", "typing", "torch"], "module": "modelscope.trainers.multi_modal.mgeo_ranking_trainer"}, "('TRAINERS', 'default', 'dreambooth-diffusion')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/dreambooth_diffusion/dreambooth_diffusion_trainer.py", "imports": ["diffusers", "PIL", "tqdm", "itertools", "shutil", "hashlib", "typing", "pathlib", "torch", "warnings", "collections", "torchvision"], "module": "modelscope.trainers.multi_modal.dreambooth_diffusion.dreambooth_diffusion_trainer"}, "('TRAINERS', 'default', 'ofa')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/ofa/ofa_trainer.py", "imports": ["tempfile", "math", "os", "shutil", "functools", "typing", "torch", "json"], "module": "modelscope.trainers.multi_modal.ofa.ofa_trainer"}, "('TRAINERS', 'default', 'image-classification-team')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/team/team_trainer.py", "imports": ["numpy", "os", "sklearn", "typing", "torch", "collections"], "module": "modelscope.trainers.multi_modal.team.team_trainer"}, "('TRAINERS', 'default', 'clip-multi-modal-embedding')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/clip/clip_trainer.py", "imports": ["typing", "torch", "math", "os"], "module": "modelscope.trainers.multi_modal.clip.clip_trainer"}, "('TRAINERS', 'default', 'efficient-diffusion-tuning')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/efficient_diffusion_tuning/efficient_diffusion_tuning_trainer.py", "imports": ["typing", "torch"], "module": "modelscope.trainers.multi_modal.efficient_diffusion_tuning.efficient_diffusion_tuning_trainer"}, "('TRAINERS', 'default', 'speech-kantts-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/audio/tts_trainer.py", "imports": ["tempfile", "zipfile", "os", "shutil", "typing", "json"], "module": "modelscope.trainers.audio.tts_trainer"}, "('TRAINERS', 'default', 'speech-separation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/audio/separation_trainer.py", "imports": ["tqdm", "numpy", "os", "torchaudio", "typing", "torch", "csv", "speechbrain"], "module": "modelscope.trainers.audio.separation_trainer"}, "('TRAINERS', 'default', 'speech-asr-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/audio/asr_trainer.py", "imports": ["tempfile", "os", "shutil", "typing", "json", "funasr"], "module": "modelscope.trainers.audio.asr_trainer"}, "('TRAINERS', 'default', 'speech_dfsmn_kws_char_farfield')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/audio/kws_farfield_trainer.py", "imports": ["numpy", "math", "os", "typing", "glob", "pickle", "torch", "datetime"], "module": "modelscope.trainers.audio.kws_farfield_trainer"}, "('TRAINERS', 'default', 'speech_frcrn_ans_cirm_16k')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/audio/ans_trainer.py", "imports": [], "module": "modelscope.trainers.audio.ans_trainer"}, "('TRAINERS', 'default', 'speech_kws_fsmn_char_ctc_nearfield')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/audio/kws_nearfield_trainer.py", "imports": ["tensorboardX", "copy", "os", "re", "typing", "yaml", "torch", "datetime"], "module": "modelscope.trainers.audio.kws_nearfield_trainer"}, "('TRAINERS', 'default', 'card-detection-scrfd')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/card_detection_scrfd_trainer.py", "imports": [], "module": "modelscope.trainers.cv.card_detection_scrfd_trainer"}, "('TRAINERS', 'default', 'cartoon-translation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/cartoon_translation_trainer.py", "imports": ["tqdm", "numpy", "os", "packaging", "tensorflow", "typing"], "module": "modelscope.trainers.cv.cartoon_translation_trainer"}, "('TRAINERS', 'default', 'image-portrait-enhancement')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/image_portrait_enhancement_trainer.py", "imports": ["collections", "torch"], "module": "modelscope.trainers.cv.image_portrait_enhancement_trainer"}, "('TRAINERS', 'default', 'referring-video-object-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/referring_video_object_segmentation_trainer.py", "imports": ["torch", "os"], "module": "modelscope.trainers.cv.referring_video_object_segmentation_trainer"}, "('TRAINERS', 'default', 'tinynas-damoyolo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/image_detection_damoyolo_trainer.py", "imports": ["math", "time", "os", "typing", "torch", "easydict", "datetime"], "module": "modelscope.trainers.cv.image_detection_damoyolo_trainer"}, "('TRAINERS', 'default', 'face-detection-scrfd')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/face_detection_scrfd_trainer.py", "imports": ["typing", "copy", "os", "time"], "module": "modelscope.trainers.cv.face_detection_scrfd_trainer"}, "('TRAINERS', 'default', 'image-classification')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/image_classifition_trainer.py", "imports": ["numpy", "copy", "os", "time", "typing", "torch"], "module": "modelscope.trainers.cv.image_classifition_trainer"}, "('TRAINERS', 'default', 'ocr-detection-db')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/ocr_detection_db_trainer.py", "imports": ["tqdm", "numpy", "copy", "math", "time", "os", "typing", "torch", "easydict", "datetime"], "module": "modelscope.trainers.cv.ocr_detection_db_trainer"}, "('TRAINERS', 'default', 'image-instance-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/image_instance_segmentation_trainer.py", "imports": [], "module": "modelscope.trainers.cv.image_instance_segmentation_trainer"}, "('TRAINERS', 'default', 'action-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/action_detection_trainer.py", "imports": ["os", "fvcore", "detectron2", "typing", "torch"], "module": "modelscope.trainers.cv.action_detection_trainer"}, "('TRAINERS', 'default', 'nerf-recon-acc')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/nerf_recon_acc_trainer.py", "imports": ["tqdm", "numpy", "cv2", "time", "os", "random", "typing", "glob", "torch", "datetime"], "module": "modelscope.trainers.cv.nerf_recon_acc_trainer"}, "('TRAINERS', 'default', 'movie-scene-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/movie_scene_segmentation_trainer.py", "imports": [], "module": "modelscope.trainers.cv.movie_scene_segmentation_trainer"}, "('TRAINERS', 'default', 'image-inpainting')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/image_inpainting_trainer.py", "imports": ["collections", "torch", "time"], "module": "modelscope.trainers.cv.image_inpainting_trainer"}, "('TRAINERS', 'default', 'image-fewshot-detection')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/image_defrcn_fewshot_detection_trainer.py", "imports": ["os", "detectron2", "typing", "torch", "collections"], "module": "modelscope.trainers.cv.image_defrcn_fewshot_detection_trainer"}, "('TRAINERS', 'default', 'vision-efficient-tuning')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/vision_efficient_tuning_trainer.py", "imports": ["typing", "torch"], "module": "modelscope.trainers.cv.vision_efficient_tuning_trainer"}, "('TRAINERS', 'default', 'ocr-recognition')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/ocr_recognition_trainer.py", "imports": ["collections", "torch", "time"], "module": "modelscope.trainers.cv.ocr_recognition_trainer"}, "('TRAINERS', 'default', 'nlp-base-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp_trainer.py", "imports": ["numpy", "torch", "typing", "os"], "module": "modelscope.trainers.nlp_trainer"}, "('TRAINERS', 'default', 'nlp-veco-trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp_trainer.py", "imports": ["numpy", "torch", "typing", "os"], "module": "modelscope.trainers.nlp_trainer"}, "('LR_SCHEDULER', 'default', 'ConstantWarmup')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/lrscheduler/warmup/warmup.py", "imports": [], "module": "modelscope.trainers.lrscheduler.warmup.warmup"}, "('LR_SCHEDULER', 'default', 'LinearWarmup')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/lrscheduler/warmup/warmup.py", "imports": [], "module": "modelscope.trainers.lrscheduler.warmup.warmup"}, "('LR_SCHEDULER', 'default', 'ExponentialWarmup')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/lrscheduler/warmup/warmup.py", "imports": [], "module": "modelscope.trainers.lrscheduler.warmup.warmup"}, "('HOOKS', 'default', 'ApexAMPOptimizerHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/optimizer/apex_optimizer_hook.py", "imports": ["logging", "packaging", "torch"], "module": "modelscope.trainers.hooks.optimizer.apex_optimizer_hook"}, "('HOOKS', 'default', 'TorchAMPOptimizerHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/optimizer/torch_optimizer_hook.py", "imports": ["logging"], "module": "modelscope.trainers.hooks.optimizer.torch_optimizer_hook"}, "('HOOKS', 'default', 'OptimizerHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/optimizer/base.py", "imports": ["logging", "torch"], "module": "modelscope.trainers.hooks.optimizer.base"}, "('HOOKS', 'default', 'NoneOptimizerHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/optimizer/base.py", "imports": ["logging", "torch"], "module": "modelscope.trainers.hooks.optimizer.base"}, "('HOOKS', 'default', 'LrSchedulerHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/lr_scheduler_hook.py", "imports": [], "module": "modelscope.trainers.hooks.lr_scheduler_hook"}, "('HOOKS', 'default', 'PlateauLrSchedulerHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/lr_scheduler_hook.py", "imports": [], "module": "modelscope.trainers.hooks.lr_scheduler_hook"}, "('HOOKS', 'default', 'NoneLrSchedulerHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/lr_scheduler_hook.py", "imports": [], "module": "modelscope.trainers.hooks.lr_scheduler_hook"}, "('HOOKS', 'default', 'CheckpointHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/checkpoint/checkpoint_hook.py", "imports": ["numpy", "os", "shutil", "random", "typing", "torch", "json"], "module": "modelscope.trainers.hooks.checkpoint.checkpoint_hook"}, "('HOOKS', 'default', 'BestCkptSaverHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/checkpoint/checkpoint_hook.py", "imports": ["numpy", "os", "shutil", "random", "typing", "torch", "json"], "module": "modelscope.trainers.hooks.checkpoint.checkpoint_hook"}, "('HOOKS', 'default', 'LoadCheckpointHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/checkpoint/load_checkpoint_hook.py", "imports": ["numpy", "packaging", "random", "typing", "torch"], "module": "modelscope.trainers.hooks.checkpoint.load_checkpoint_hook"}, "('HOOKS', 'default', 'EvaluationHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/evaluation_hook.py", "imports": ["collections", "typing"], "module": "modelscope.trainers.hooks.evaluation_hook"}, "('HOOKS', 'default', 'EarlyStopHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/early_stop_hook.py", "imports": ["numpy"], "module": "modelscope.trainers.hooks.early_stop_hook"}, "('HOOKS', 'default', 'IterTimerHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/iter_timer_hook.py", "imports": ["time"], "module": "modelscope.trainers.hooks.iter_timer_hook"}, "('HOOKS', 'default', 'ClipClampLogitScaleHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/clip_clamp_logit_scale_hook.py", "imports": ["torch"], "module": "modelscope.trainers.hooks.clip_clamp_logit_scale_hook"}, "('HOOKS', 'default', 'TextLoggerHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/logger/text_logger_hook.py", "imports": ["os", "torch", "collections", "json", "datetime"], "module": "modelscope.trainers.hooks.logger.text_logger_hook"}, "('HOOKS', 'default', 'TensorboardHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/logger/tensorboard_hook.py", "imports": ["numpy", "torch", "os"], "module": "modelscope.trainers.hooks.logger.tensorboard_hook"}, "('HOOKS', 'default', 'DeepspeedHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/distributed/deepspeed_hook.py", "imports": ["math", "os", "shutil", "functools", "deepspeed", "transformers", "torch", "megatron_util"], "module": "modelscope.trainers.hooks.distributed.deepspeed_hook"}, "('HOOKS', 'default', 'MegatronHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/distributed/megatron_hook.py", "imports": ["shutil", "torch", "megatron_util", "os"], "module": "modelscope.trainers.hooks.distributed.megatron_hook"}, "('HOOKS', 'default', 'DDPHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/distributed/ddp_hook.py", "imports": [], "module": "modelscope.trainers.hooks.distributed.ddp_hook"}, "('HOOKS', 'default', 'SparsityHook')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/compression/sparsity_hook.py", "imports": ["os"], "module": "modelscope.trainers.hooks.compression.sparsity_hook"}, "('PARALLEL', 'default', 'DistributedDataParallel')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/parallel/builder.py", "imports": ["torch"], "module": "modelscope.trainers.parallel.builder"}, "('TRAINERS', 'default', 'dummy')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/base.py", "imports": ["abc", "os", "typing", "time"], "module": "modelscope.trainers.base"}, "('TRAINERS', 'default', 'trainer')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/trainer.py", "imports": ["distutils", "copy", "os", "functools", "typing", "torch", "collections", "json", "inspect"], "module": "modelscope.trainers.trainer"}, "('CUSTOM_DATASETS', 'image-quality-assessment-degradation', 'image-quality-assessment-degradation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/image_quality_assessment_degradation/image_quality_assessment_degradation_dataset.py", "imports": ["torchvision"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.image_quality_assessment_degradation.image_quality_assessment_degradation_dataset"}, "('CUSTOM_DATASETS', 'text-ranking', 'mgeo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/mgeo_ranking_dataset.py", "imports": ["typing", "json", "torch", "random"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.mgeo_ranking_dataset"}, "('CUSTOM_DATASETS', 'image-quality-assessment-mos', 'image-quality-assessment-mos')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/image_quality_assmessment_mos/image_quality_assessment_mos_dataset.py", "imports": [], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.image_quality_assmessment_mos.image_quality_assessment_mos_dataset"}, "('CUSTOM_DATASETS', 'video-super-resolution', 'real-basicvsr')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/video_super_resolution/video_super_resolution_dataset.py", "imports": ["collections", "numpy", "torch", "cv2"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.video_super_resolution.video_super_resolution_dataset"}, "('CUSTOM_DATASETS', 'image-segmentation', 'cascade_mask_rcnn_swin')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/image_instance_segmentation_coco_dataset.py", "imports": ["numpy", "pycocotools", "os"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.image_instance_segmentation_coco_dataset"}, "('CUSTOM_DATASETS', 'movie-scene-segmentation', 'resnet50-bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/movie_scene_segmentation/movie_scene_segmentation_dataset.py", "imports": ["copy", "os", "random", "torch", "json", "torchvision"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.movie_scene_segmentation.movie_scene_segmentation_dataset"}, "('CUSTOM_DATASETS', 'image-inpainting', 'FFTInpainting')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/image_inpainting/image_inpainting_dataset.py", "imports": ["numpy", "cv2", "os", "albumentations", "glob", "enum"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.image_inpainting.image_inpainting_dataset"}, "('CUSTOM_DATASETS', 'referring-video-object-segmentation', 'swinT-referring-video-object-segmentation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/referring_video_object_segmentation/referring_video_object_segmentation_dataset.py", "imports": ["tqdm", "numpy", "pycocotools", "os", "h5py", "glob", "torch", "pandas", "json", "torchvision"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.referring_video_object_segmentation.referring_video_object_segmentation_dataset"}, "('CUSTOM_DATASETS', 'language-guided-video-summarization', 'clip-it-language-guided-video-summarization')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/language_guided_video_summarization_dataset.py", "imports": ["numpy", "os", "h5py", "torch", "json"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.language_guided_video_summarization_dataset"}, "('CUSTOM_DATASETS', 'bad-image-detecting', 'bad-image-detecting')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/bad_image_detecting/bad_image_detecting_dataset.py", "imports": [], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.bad_image_detecting.bad_image_detecting_dataset"}, "('CUSTOM_DATASETS', 'image-deblurring', 'RedsDataset')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/reds_image_deblurring_dataset.py", "imports": ["numpy", "cv2"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.reds_image_deblurring_dataset"}, "('CUSTOM_DATASETS', 'image-colorization', 'ddcolor')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/image_colorization/image_colorization_dataset.py", "imports": ["numpy", "torch", "cv2"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.image_colorization.image_colorization_dataset"}, "('CUSTOM_DATASETS', 'image-denoising', 'SiddDataset')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/sidd_image_denoising/sidd_image_denoising_dataset.py", "imports": ["numpy", "cv2"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.sidd_image_denoising.sidd_image_denoising_dataset"}, "('CUSTOM_DATASETS', 'video-frame-interpolation', 'video-frame-interpolation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/video_frame_interpolation/video_frame_interpolation_dataset.py", "imports": ["numpy", "torch", "cv2"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.video_frame_interpolation.video_frame_interpolation_dataset"}, "('CUSTOM_DATASETS', 'ocr-recognition', 'OCRRecognition')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/ocr_recognition_dataset.py", "imports": ["PIL", "numpy", "cv2", "os", "six", "lmdb", "torch", "json"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.ocr_recognition_dataset"}, "('CUSTOM_DATASETS', 'video-stabilization', 'video-stabilization')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/video_stabilization/video_stabilization_dataset.py", "imports": [], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.video_stabilization.video_stabilization_dataset"}, "('CUSTOM_DATASETS', 'image-portrait-enhancement', 'PairedDataset')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/image_portrait_enhancement/image_portrait_enhancement_dataset.py", "imports": ["numpy", "cv2"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.image_portrait_enhancement.image_portrait_enhancement_dataset"}, "('CUSTOM_DATASETS', 'nli', 'veco')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/veco_dataset.py", "imports": ["numpy", "datasets", "typing"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.veco_dataset"}, "('CUSTOM_DATASETS', 'text-ranking', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/text_ranking_dataset.py", "imports": ["typing", "torch", "random"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.text_ranking_dataset"}, "('CUSTOM_DATASETS', 'sentence-embedding', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/text_ranking_dataset.py", "imports": ["typing", "torch", "random"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.text_ranking_dataset"}, "('CUSTOM_DATASETS', 'image-deblurring', 'GoproDataset')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/gopro_image_deblurring_dataset.py", "imports": ["numpy", "cv2"], "module": "modelscope.msdatasets.dataset_cls.custom_datasets.gopro_image_deblurring_dataset"}, "('EXPORTERS', 'transformer-crf', 'transformer-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/model_for_token_classification_exporter.py", "imports": ["collections", "typing", "torch"], "module": "modelscope.exporters.nlp.model_for_token_classification_exporter"}, "('EXPORTERS', 'token-classification', 'transformer-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/model_for_token_classification_exporter.py", "imports": ["collections", "typing", "torch"], "module": "modelscope.exporters.nlp.model_for_token_classification_exporter"}, "('EXPORTERS', 'named-entity-recognition', 'transformer-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/model_for_token_classification_exporter.py", "imports": ["collections", "typing", "torch"], "module": "modelscope.exporters.nlp.model_for_token_classification_exporter"}, "('EXPORTERS', 'part-of-speech', 'transformer-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/model_for_token_classification_exporter.py", "imports": ["collections", "typing", "torch"], "module": "modelscope.exporters.nlp.model_for_token_classification_exporter"}, "('EXPORTERS', 'word-segmentation', 'transformer-crf')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/model_for_token_classification_exporter.py", "imports": ["collections", "typing", "torch"], "module": "modelscope.exporters.nlp.model_for_token_classification_exporter"}, "('EXPORTERS', 'text-classification', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/sbert_for_sequence_classification_exporter.py", "imports": ["collections", "typing", "torch"], "module": "modelscope.exporters.nlp.sbert_for_sequence_classification_exporter"}, "('EXPORTERS', 'text-classification', 'structbert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/sbert_for_sequence_classification_exporter.py", "imports": ["collections", "typing", "torch"], "module": "modelscope.exporters.nlp.sbert_for_sequence_classification_exporter"}, "('EXPORTERS', 'sentence-similarity', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/sbert_for_sequence_classification_exporter.py", "imports": ["collections", "typing", "torch"], "module": "modelscope.exporters.nlp.sbert_for_sequence_classification_exporter"}, "('EXPORTERS', 'sentiment-classification', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/sbert_for_sequence_classification_exporter.py", "imports": ["collections", "typing", "torch"], "module": "modelscope.exporters.nlp.sbert_for_sequence_classification_exporter"}, "('EXPORTERS', 'nli', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/sbert_for_sequence_classification_exporter.py", "imports": ["collections", "typing", "torch"], "module": "modelscope.exporters.nlp.sbert_for_sequence_classification_exporter"}, "('EXPORTERS', 'sentence-similarity', 'structbert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/sbert_for_sequence_classification_exporter.py", "imports": ["collections", "typing", "torch"], "module": "modelscope.exporters.nlp.sbert_for_sequence_classification_exporter"}, "('EXPORTERS', 'sentiment-classification', 'structbert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/sbert_for_sequence_classification_exporter.py", "imports": ["collections", "typing", "torch"], "module": "modelscope.exporters.nlp.sbert_for_sequence_classification_exporter"}, "('EXPORTERS', 'nli', 'structbert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/sbert_for_sequence_classification_exporter.py", "imports": ["collections", "typing", "torch"], "module": "modelscope.exporters.nlp.sbert_for_sequence_classification_exporter"}, "('EXPORTERS', 'translation', 'csanmt-translation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/csanmt_for_translation_exporter.py", "imports": ["typing", "tensorflow", "os"], "module": "modelscope.exporters.nlp.csanmt_for_translation_exporter"}, "('EXPORTERS', 'zero-shot-classification', 'bert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/sbert_for_zero_shot_classification_exporter.py", "imports": ["collections", "typing"], "module": "modelscope.exporters.nlp.sbert_for_zero_shot_classification_exporter"}, "('EXPORTERS', 'zero-shot-classification', 'structbert')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/sbert_for_zero_shot_classification_exporter.py", "imports": ["collections", "typing"], "module": "modelscope.exporters.nlp.sbert_for_zero_shot_classification_exporter"}, "('EXPORTERS', 'text-to-image-synthesis', 'stable-diffusion')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/multi_modal/stable_diffusion_exporter.py", "imports": ["onnx", "argparse", "shutil", "os", "packaging", "typing", "pathlib", "torch", "diffusers", "collections"], "module": "modelscope.exporters.multi_modal.stable_diffusion_exporter"}, "('EXPORTERS', 'acoustic-noise-suppression', 'speech_dfsmn_ans')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/audio/ans_dfsmn_exporter.py", "imports": ["torch", "os"], "module": "modelscope.exporters.audio.ans_dfsmn_exporter"}, "('EXPORTERS', 'domain-specific-object-detection', 'tinynas-damoyolo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/cv/object_detection_damoyolo_exporter.py", "imports": ["numpy", "os", "onnx", "functools", "typing", "torch"], "module": "modelscope.exporters.cv.object_detection_damoyolo_exporter"}, "('EXPORTERS', 'image-object-detection', 'tinynas-damoyolo')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/cv/object_detection_damoyolo_exporter.py", "imports": ["numpy", "os", "onnx", "functools", "typing", "torch"], "module": "modelscope.exporters.cv.object_detection_damoyolo_exporter"}, "('EXPORTERS', 'default', 'cartoon-translation')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/cv/cartoon_translation_exporter.py", "imports": ["packaging", "typing", "tensorflow", "os"], "module": "modelscope.exporters.cv.cartoon_translation_exporter"}, "('EXPORTERS', 'face-detection', 'scrfd')": {"filepath": "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/cv/face_detection_scrfd_exporter.py", "imports": ["numpy", "os", "onnx", "functools", "typing", "torch"], "module": "modelscope.exporters.cv.face_detection_scrfd_exporter"}}, "requirements": {"modelscope.models.nlp.space.dialog_intent_prediction": ["typing", "os"], "modelscope.models.nlp.space.dialog_state_tracking": ["typing", "transformers", "torch"], "modelscope.models.nlp.space.dialog_modeling": ["typing", "os"], "modelscope.models.nlp.space.model.gen_unified_transformer": ["torch"], "modelscope.models.nlp.space.model.tokenization_space": ["transformers"], "modelscope.models.nlp.space.model.generator": ["numpy", "torch", "math"], "modelscope.models.nlp.space.model.intent_unified_transformer": ["torch"], "modelscope.models.nlp.space.model.model_base": ["torch", "os"], "modelscope.models.nlp.space.model.unified_transformer": ["numpy", "torch"], "modelscope.models.nlp.space.configuration": [], "modelscope.models.nlp.space.modules.functions": ["numpy", "torch"], "modelscope.models.nlp.space.modules.embedder": ["torch"], "modelscope.models.nlp.space.modules.transformer_block": ["torch"], "modelscope.models.nlp.space.modules.multihead_attention": ["torch"], "modelscope.models.nlp.space.modules.feedforward": ["torch"], "modelscope.models.nlp.space_T_en.text_to_sql": ["text2sql_lgesql", "typing", "torch", "os"], "modelscope.models.nlp.dgds.document_grounded_dialog_retrieval": ["typing", "torch", "os"], "modelscope.models.nlp.dgds.document_grounded_dialog_rerank": ["typing", "torch", "os"], "modelscope.models.nlp.dgds.document_grounded_dialog_generate": ["typing", "torch", "os"], "modelscope.models.nlp.dgds.backbone": ["__future__", "transformers", "torch", "os"], "modelscope.models.nlp.peer.text_classification": ["copy", "torch"], "modelscope.models.nlp.peer.sas_utils": ["numpy", "nltk", "torch", "random"], "modelscope.models.nlp.peer.backbone": ["math", "dataclasses", "typing", "transformers", "torch"], "modelscope.models.nlp.peer.configuration": ["transformers"], "modelscope.models.nlp.ponet.fill_mask": ["transformers", "torch"], "modelscope.models.nlp.ponet.backbone": ["distutils", "math", "packaging", "transformers", "torch"], "modelscope.models.nlp.ponet.configuration": ["transformers"], "modelscope.models.nlp.ponet.document_segmentation": ["typing", "torch"], "modelscope.models.nlp.ponet.tokenization": ["typing", "transformers"], "modelscope.models.nlp.fid_T5.text_generation": ["io", "transformers", "torch", "os"], "modelscope.models.nlp.canmt.sequence_generator": ["numpy", "sys", "typing", "torch", "math", "fairseq"], "modelscope.models.nlp.canmt.canmt_translation": ["numpy", "math", "os", "typing", "torch"], "modelscope.models.nlp.canmt.canmt_model": ["numpy", "typing", "torch", "math", "fairseq"], "modelscope.models.nlp.chatglm2.quantization": ["bz2", "typing", "cpm_kernels", "torch", "ctypes", "base64"], "modelscope.models.nlp.chatglm2.configuration": ["transformers"], "modelscope.models.nlp.chatglm2.text_generation": ["copy", "math", "sys", "typing", "transformers", "torch", "warnings"], "modelscope.models.nlp.chatglm2.tokenization": ["typing", "sentencepiece", "transformers", "os"], "modelscope.models.nlp.plug_mental.text_classification": ["torch"], "modelscope.models.nlp.plug_mental.adv_utils": ["torch"], "modelscope.models.nlp.plug_mental.backbone": ["math", "packaging", "dataclasses", "transformers", "typing", "torch"], "modelscope.models.nlp.plug_mental.configuration": ["transformers"], "modelscope.models.nlp.hf_transformers.backbone": ["transformers"], "modelscope.models.nlp.csanmt.translation": ["collections", "typing", "tensorflow", "math"], "modelscope.models.nlp.veco.text_classification": ["transformers"], "modelscope.models.nlp.veco.fill_mask": ["transformers"], "modelscope.models.nlp.veco.backbone": ["transformers"], "modelscope.models.nlp.veco.configuration": ["transformers"], "modelscope.models.nlp.veco.token_classification": ["transformers", "torch"], "modelscope.models.nlp.llama.tokenization_fast": ["shutil", "typing", "transformers", "os"], "modelscope.models.nlp.llama.backbone": ["typing", "transformers", "torch", "math"], "modelscope.models.nlp.llama.convert_llama_weights_to_hf": ["math", "os", "argparse", "shutil", "gc", "torch", "json"], "modelscope.models.nlp.llama.configuration": ["transformers"], "modelscope.models.nlp.llama.text_generation": ["typing", "torch"], "modelscope.models.nlp.llama.tokenization": ["os", "shutil", "sentencepiece", "typing", "transformers"], "modelscope.models.nlp.llama2.tokenization_fast": ["os", "shutil", "tokenizers", "typing", "transformers"], "modelscope.models.nlp.llama2.backbone": ["typing", "transformers", "torch", "math"], "modelscope.models.nlp.llama2.configuration": ["transformers"], "modelscope.models.nlp.llama2.text_generation": ["typing", "transformers", "torch"], "modelscope.models.nlp.llama2.tokenization": ["os", "shutil", "sentencepiece", "typing", "transformers"], "modelscope.models.nlp.polylm.text_generation": ["collections", "typing", "transformers", "torch"], "modelscope.models.nlp.codegeex.tokenizer": ["typing", "transformers", "torch"], "modelscope.models.nlp.codegeex.codegeex_for_code_translation": ["typing", "copy", "torch"], "modelscope.models.nlp.codegeex.codegeex_for_code_generation": ["typing", "copy", "torch"], "modelscope.models.nlp.codegeex.codegeex": ["torch", "math"], "modelscope.models.nlp.codegeex.inference": ["typing", "torch"], "modelscope.models.nlp.plug.generator": ["torch"], "modelscope.models.nlp.plug.AnnealingLR": ["torch", "math"], "modelscope.models.nlp.plug.backbone": ["logging", "__future__", "math", "torch", "megatron_util"], "modelscope.models.nlp.plug.configuration": ["transformers", "copy", "json"], "modelscope.models.nlp.plug.distributed_plug": ["typing", "torch", "megatron_util"], "modelscope.models.nlp.glm_130b.quantization.functional": ["torch"], "modelscope.models.nlp.glm_130b.quantization.layers": ["torch", "SwissArmyTransformer"], "modelscope.models.nlp.glm_130b.initialize": ["argparse", "torch", "SwissArmyTransformer", "time"], "modelscope.models.nlp.glm_130b.text_generation": ["copy", "os", "time", "functools", "stat", "re", "random", "sys", "typing", "torch", "SwissArmyTransformer"], "modelscope.models.nlp.glm_130b.generation.strategies": ["numpy", "torch", "SwissArmyTransformer"], "modelscope.models.nlp.bloom.backbone": ["transformers"], "modelscope.models.nlp.T5.backbone": ["copy", "math", "os", "typing", "transformers", "torch", "warnings"], "modelscope.models.nlp.T5.configuration": ["typing", "transformers"], "modelscope.models.nlp.T5.text2text_generation": ["copy", "typing", "transformers", "torch", "warnings"], "modelscope.models.nlp.chatglm.quantization": ["bz2", "typing", "cpm_kernels", "torch", "ctypes", "base64"], "modelscope.models.nlp.chatglm.configuration": ["transformers"], "modelscope.models.nlp.chatglm.text_generation": ["copy", "math", "os", "re", "sys", "typing", "transformers", "torch", "warnings"], "modelscope.models.nlp.chatglm.tokenization": ["numpy", "os", "sentencepiece", "transformers", "typing"], "modelscope.models.nlp.structbert.text_classification": ["torch"], "modelscope.models.nlp.structbert.faq_question_answering": ["math", "os", "typing", "torch", "collections"], "modelscope.models.nlp.structbert.adv_utils": ["torch"], "modelscope.models.nlp.structbert.fill_mask": ["transformers", "torch"], "modelscope.models.nlp.structbert.backbone": ["math", "packaging", "dataclasses", "transformers", "typing", "torch"], "modelscope.models.nlp.structbert.configuration": ["transformers"], "modelscope.models.nlp.structbert.token_classification": ["torch"], "modelscope.models.nlp.space_T_cn.table_question_answering": ["numpy", "os", "transformers", "typing", "torch"], "modelscope.models.nlp.space_T_cn.backbone": ["numpy", "copy", "__future__", "tempfile", "math", "os", "shutil", "tarfile", "torch"], "modelscope.models.nlp.space_T_cn.configuration": ["logging", "__future__", "copy", "json"], "modelscope.models.nlp.heads.text_ranking_head": ["typing", "torch"], "modelscope.models.nlp.heads.text_generation_head": ["typing", "torch"], "modelscope.models.nlp.heads.torch_pretrain_head": ["typing", "transformers", "torch"], "modelscope.models.nlp.heads.fill_mask_head": ["typing", "transformers", "torch"], "modelscope.models.nlp.heads.crf_head": ["typing", "transformers", "torch"], "modelscope.models.nlp.heads.token_classification_head": ["typing", "torch"], "modelscope.models.nlp.heads.infromation_extraction_head": ["torch"], "modelscope.models.nlp.heads.text_classification_head": ["typing", "torch"], "modelscope.models.nlp.fid_plug.backbone": ["numpy", "copy", "math", "os", "dataclasses", "typing", "transformers", "torch"], "modelscope.models.nlp.fid_plug.configuration": ["transformers"], "modelscope.models.nlp.fid_plug.text_generation": ["io", "transformers", "torch", "os"], "modelscope.models.nlp.bart.text_error_correction": ["typing", "torch", "os"], "modelscope.models.nlp.gpt3.distributed_gpt3": ["math", "os", "typing", "transformers", "torch", "megatron_util", "collections"], "modelscope.models.nlp.gpt3.tokenizer": ["typing", "tokenizers"], "modelscope.models.nlp.gpt3.backbone": ["math", "os", "addict", "typing", "transformers", "torch"], "modelscope.models.nlp.gpt3.configuration": ["transformers", "torch"], "modelscope.models.nlp.gpt3.text_generation": ["collections", "typing", "transformers", "torch"], "modelscope.models.nlp.deberta_v2.fill_mask": ["typing", "transformers", "torch"], "modelscope.models.nlp.deberta_v2.tokenization_fast": ["shutil", "typing", "transformers", "os"], "modelscope.models.nlp.deberta_v2.backbone": ["collections", "typing", "transformers", "torch"], "modelscope.models.nlp.deberta_v2.configuration": ["transformers"], "modelscope.models.nlp.deberta_v2.tokenization": ["typing", "transformers", "sentencepiece", "os", "unicodedata"], "modelscope.models.nlp.bert.text_classification": [], "modelscope.models.nlp.bert.text_ranking": [], "modelscope.models.nlp.bert.fill_mask": [], "modelscope.models.nlp.bert.backbone": ["packaging", "transformers", "torch", "math"], "modelscope.models.nlp.bert.configuration": ["collections", "typing", "transformers"], "modelscope.models.nlp.bert.document_segmentation": ["typing", "torch"], "modelscope.models.nlp.bert.sentence_embedding": ["torch"], "modelscope.models.nlp.bert.word_alignment": ["torch"], "modelscope.models.nlp.bert.siamese_uie": ["copy", "torch"], "modelscope.models.nlp.bert.token_classification": [], "modelscope.models.nlp.gpt_neo.backbone": ["transformers"], "modelscope.models.nlp.gpt2.backbone": ["transformers"], "modelscope.models.nlp.palm_v2.configuration": ["transformers"], "modelscope.models.nlp.palm_v2.text_generation": ["numpy", "copy", "math", "os", "subprocess", "dataclasses", "typing", "transformers", "torch", "codecs", "json"], "modelscope.models.nlp.palm_v2.dureader_eval": ["numpy", "copy", "math", "argparse", "re", "rouge", "sys", "json", "collections", "zipfile"], "modelscope.models.nlp.megatron_bert.fill_mask": ["transformers", "torch"], "modelscope.models.nlp.megatron_bert.backbone": ["transformers", "torch", "math"], "modelscope.models.nlp.megatron_bert.configuration": ["collections", "typing", "transformers"], "modelscope.models.nlp.lstm.backbone": ["torch"], "modelscope.models.nlp.lstm.token_classification": [], "modelscope.models.nlp.use.transformer": ["torch", "math"], "modelscope.models.nlp.use.user_satisfaction_estimation": ["numpy", "os", "transformers", "typing", "torch"], "modelscope.models.nlp.gpt_moe.tokenizer": ["tokenizers"], "modelscope.models.nlp.gpt_moe.moe.experts": ["copy", "torch"], "modelscope.models.nlp.gpt_moe.moe.sharded_moe": ["apex", "math", "tutel", "typing", "torch", "megatron_util", "scipy"], "modelscope.models.nlp.gpt_moe.moe.mappings": ["torch", "megatron_util"], "modelscope.models.nlp.gpt_moe.moe.utils": ["typing", "torch"], "modelscope.models.nlp.gpt_moe.moe.layer": ["typing", "torch", "megatron_util"], "modelscope.models.nlp.gpt_moe.distributed_gpt_moe": ["megatron_util", "transformers", "torch", "math"], "modelscope.models.nlp.gpt_moe.checkpointing": ["torch", "megatron_util", "os"], "modelscope.models.nlp.gpt_moe.backbone": ["math", "os", "addict", "typing", "transformers", "torch"], "modelscope.models.nlp.gpt_moe.configuration": ["transformers", "torch"], "modelscope.models.nlp.gpt_moe.text_generation": ["typing", "transformers"], "modelscope.models.nlp.mglm.train_utils": ["apex", "deepspeed", "torch", "megatron_util"], "modelscope.models.nlp.mglm.test.test_rel_shift": ["numpy", "matplotlib", "learning_rates", "torch"], "modelscope.models.nlp.mglm.test.test_block": ["argparse", "blocklm_utils", "numpy", "random"], "modelscope.models.nlp.mglm.tasks.seq2seq.finetune": ["pretrain_glm", "torch", "megatron_util", "finetune_glm", "tasks", "collections", "functools"], "modelscope.models.nlp.mglm.tasks.seq2seq.evaluate": ["generation_utils", "string", "torch", "megatron_util", "rouge_score", "datetime", "random"], "modelscope.models.nlp.mglm.tasks.seq2seq.dataset": ["tqdm", "numpy", "os", "tasks", "random", "torch", "json", "utils", "data_utils"], "modelscope.models.nlp.mglm.tasks.eval_utils": ["time", "os", "finetune_glm", "tasks", "sklearn", "random", "typing", "torch", "megatron_util", "collections", "utils", "datetime"], "modelscope.models.nlp.mglm.tasks.superglue.finetune": ["tasks", "collections", "finetune_glm"], "modelscope.models.nlp.mglm.tasks.superglue.evaluate": ["__future__", "typing", "string", "tasks", "collections", "functools", "re"], "modelscope.models.nlp.mglm.tasks.superglue.pvp": ["numpy", "copy", "string", "math", "tasks", "abc", "random", "typing", "collections", "utils"], "modelscope.models.nlp.mglm.tasks.superglue.dataset": ["copy", "os", "pandas", "re", "random", "glob", "csv", "json", "utils", "tqdm", "numpy", "abc", "typing", "torch", "collections", "data_utils"], "modelscope.models.nlp.mglm.tasks.language_model.detokenizer": ["re"], "modelscope.models.nlp.mglm.tasks.language_model.finetune": ["pretrain_glm", "torch", "math", "finetune_glm", "tasks", "megatron_util", "functools"], "modelscope.models.nlp.mglm.tasks.language_model.dataset": ["numpy", "math", "itertools", "tasks", "bisect", "torch", "json", "utils"], "modelscope.models.nlp.mglm.tasks.data_utils": ["numpy", "copy", "re", "typing", "pickle", "torch", "megatron_util", "json"], "modelscope.models.nlp.mglm.generation_utils": ["collections", "typing", "abc", "torch"], "modelscope.models.nlp.mglm.mglm_for_text_summarization": ["numpy", "os", "random", "typing", "torch", "megatron_util"], "modelscope.models.nlp.mglm.process_grid": ["statistics", "sys", "glob", "os", "json"], "modelscope.models.nlp.mglm.data_utils.file_utils": ["logging", "__future__", "tempfile", "os", "requests", "pathlib", "urllib", "io", "json", "tqdm", "shutil", "functools", "botocore", "hashlib", "boto3", "sys"], "modelscope.models.nlp.mglm.data_utils.datasets": ["math", "os", "bisect", "random", "csv", "json", "tqdm", "numpy", "operator", "nltk", "time", "itertools", "torch", "pandas"], "modelscope.models.nlp.mglm.data_utils.tokenization_gpt2": ["logging", "__future__", "regex", "os", "functools", "sys", "io", "json"], "modelscope.models.nlp.mglm.data_utils.extraction": ["json", "glob", "nltk", "os"], "modelscope.models.nlp.mglm.data_utils.wordpiece": ["logging", "__future__", "os", "unicodedata", "io", "collections"], "modelscope.models.nlp.mglm.data_utils.lazy_loader": ["numpy", "pickle", "itertools", "time", "mmap", "torch", "os"], "modelscope.models.nlp.mglm.data_utils.sp_tokenizer": ["os"], "modelscope.models.nlp.mglm.data_utils.corpora": ["tqdm", "multiprocessing", "os", "random", "torch", "collections", "queue", "json"], "modelscope.models.nlp.mglm.data_utils.samplers": ["numpy", "sys", "torch", "math", "os"], "modelscope.models.nlp.mglm.data_utils.tokenization": ["regex", "nltk", "os", "itertools", "sentencepiece", "random", "torch", "csv", "collections"], "modelscope.models.nlp.mglm.utils": ["numpy", "time", "os", "subprocess", "random", "torch", "megatron_util", "json"], "modelscope.models.nlp.mglm.blocklm_utils": ["numpy", "copy", "torch", "math", "megatron_util", "scipy", "random"], "modelscope.models.nlp.mglm.model.modeling_glm": ["torch", "megatron_util"], "modelscope.models.nlp.mglm.model.distributed": ["torch", "megatron_util"], "modelscope.models.nlp.mglm.model.prompt": ["torch", "random"], "modelscope.models.nlp.mglm.model.downstream": ["torch"], "modelscope.models.nlp.mglm.model.modeling_bert": ["logging", "__future__", "copy", "apex", "tempfile", "math", "os", "shutil", "tarfile", "torch", "megatron_util", "json", "data_utils"], "modelscope.models.nlp.mglm.model.transformer": ["apex", "torch", "math", "megatron_util", "deepspeed"], "modelscope.models.nlp.mglm.run_test": ["test", "sys"], "modelscope.models.nlp.mglm.arguments": ["os", "argparse", "deepspeed", "torch", "json"], "modelscope.models.nlp.mglm.configure_data": ["numpy", "copy", "itertools", "os", "bisect", "random", "torch", "megatron_util"], "modelscope.models.nlp.task_models.information_extraction": ["numpy", "typing"], "modelscope.models.nlp.task_models.text_classification": ["numpy", "typing"], "modelscope.models.nlp.task_models.text_ranking": ["numpy", "typing"], "modelscope.models.nlp.task_models.fill_mask": ["numpy", "torch", "typing"], "modelscope.models.nlp.task_models.machine_reading_comprehension": ["os", "dataclasses", "typing", "transformers", "torch"], "modelscope.models.nlp.task_models.text_generation": ["numpy", "transformers", "torch", "typing"], "modelscope.models.nlp.task_models.task_model": ["os", "abc", "re", "typing", "torch", "collections"], "modelscope.models.nlp.task_models.feature_extraction": ["numpy", "typing"], "modelscope.models.nlp.task_models.token_classification": ["typing", "torch"], "modelscope.models.nlp.xlm_roberta.backbone": ["packaging", "transformers", "torch", "math"], "modelscope.models.nlp.xlm_roberta.configuration": ["collections", "typing", "transformers"], "modelscope.models.nlp.qwen.backbone": ["importlib", "einops", "math", "typing", "transformers", "flash_attn", "torch"], "modelscope.models.nlp.qwen.configuration": ["typing", "transformers"], "modelscope.models.nlp.qwen.text_generation": ["typing", "transformers", "torch", "warnings"], "modelscope.models.nlp.qwen.qwen_generation_utils": ["numpy", "transformers", "torch", "typing"], "modelscope.models.nlp.qwen.tokenization": ["logging", "__future__", "os", "unicodedata", "typing", "transformers", "tiktoken", "io", "json", "base64"], "modelscope.models.nlp.unite.translation_evaluation": ["numpy", "math", "packaging", "dataclasses", "typing", "transformers", "torch", "warnings"], "modelscope.models.nlp.unite.configuration": ["enum"], "modelscope.models.multi_modal.diffusion.unet_generator": ["torch", "math"], "modelscope.models.multi_modal.diffusion.tokenizer": ["collections", "__future__", "unicodedata", "six"], "modelscope.models.multi_modal.diffusion.model": ["numpy", "os", "typing", "torch", "json"], "modelscope.models.multi_modal.diffusion.unet_upsampler_1024": ["torch", "math"], "modelscope.models.multi_modal.diffusion.diffusion": ["torch", "math"], "modelscope.models.multi_modal.diffusion.structbert": ["numpy", "copy", "__future__", "torch", "math", "json", "six"], "modelscope.models.multi_modal.diffusion.unet_upsampler_256": ["functools", "torch", "math"], "modelscope.models.multi_modal.mplug_owl.configuration_mplug_owl": ["typing", "copy", "transformers", "os"], "modelscope.models.multi_modal.mplug_owl.modeling_mplug_owl": ["logging", "copy", "math", "os", "random", "dataclasses", "transformers", "typing", "torch", "io"], "modelscope.models.multi_modal.video_synthesis.unet_sd": ["torch", "einops", "math"], "modelscope.models.multi_modal.video_synthesis.diffusion": ["torch"], "modelscope.models.multi_modal.video_synthesis.text_to_video_synthesis_model": ["einops", "os", "open_clip", "typing", "torch"], "modelscope.models.multi_modal.video_synthesis.autoencoder": ["numpy", "torch"], "modelscope.models.multi_modal.mplug.predictor": ["__future__", "torch"], "modelscope.models.multi_modal.mplug.modeling_mplug": ["math", "os", "transformers", "typing", "torch"], "modelscope.models.multi_modal.mplug.configuration_mplug": ["typing", "yaml", "transformers", "os"], "modelscope.models.multi_modal.mplug.mvit": ["timm", "numpy", "torch", "fairscale", "collections", "functools"], "modelscope.models.multi_modal.mplug.clip.clip": ["collections", "typing", "torch"], "modelscope.models.multi_modal.dpm_solver_pytorch": ["torch", "math"], "modelscope.models.multi_modal.videocomposer.videocomposer_model": ["copy", "einops", "os", "open_clip", "typing", "torch", "pynvml"], "modelscope.models.multi_modal.videocomposer.dpm_solver": ["torch", "math"], "modelscope.models.multi_modal.videocomposer.config": ["logging", "torch", "os", "easydict", "datetime"], "modelscope.models.multi_modal.videocomposer.annotator.sketch.sketch_simplification": ["torch", "math", "os"], "modelscope.models.multi_modal.videocomposer.annotator.sketch.pidinet": ["torch", "math", "os"], "modelscope.models.multi_modal.videocomposer.annotator.histogram.palette": ["numpy", "sklearn", "skimage", "os"], "modelscope.models.multi_modal.videocomposer.annotator.util": ["numpy", "cv2", "os"], "modelscope.models.multi_modal.videocomposer.ops.random_mask": ["numpy", "cv2"], "modelscope.models.multi_modal.videocomposer.ops.distributed": ["numpy", "torch", "pickle", "collections", "functools"], "modelscope.models.multi_modal.videocomposer.ops.losses": ["torch", "math"], "modelscope.models.multi_modal.videocomposer.ops.utils": ["logging", "skvideo", "copy", "einops", "math", "os", "imageio", "requests", "glob", "pickle", "urllib", "io", "json", "gzip", "oss2", "PIL", "numpy", "multiprocessing", "time", "hashlib", "sys", "torch", "binascii", "zipfile", "base64", "torchvision"], "modelscope.models.multi_modal.videocomposer.ops.degration": ["numpy", "math", "os", "random", "torch", "scipy", "datetime", "torchvision"], "modelscope.models.multi_modal.videocomposer.utils.distributed": ["logging", "functools", "torch", "pickle"], "modelscope.models.multi_modal.videocomposer.utils.config": ["copy", "os", "argparse", "yaml", "json"], "modelscope.models.multi_modal.videocomposer.utils.utils": ["logging", "skvideo", "copy", "einops", "math", "os", "imageio", "requests", "random", "glob", "pickle", "urllib", "io", "json", "gzip", "oss2", "PIL", "numpy", "multiprocessing", "time", "hashlib", "sys", "torch", "binascii", "zipfile", "base64", "torchvision"], "modelscope.models.multi_modal.videocomposer.unet_sd": ["einops", "math", "os", "functools", "torch", "fairscale", "rotary_embedding_torch", "config"], "modelscope.models.multi_modal.videocomposer.mha_flash": ["numpy", "flash_attn", "torch", "math", "time", "os", "random"], "modelscope.models.multi_modal.videocomposer.diffusion": ["torch", "math"], "modelscope.models.multi_modal.videocomposer.models.clip": ["torch", "math", "os"], "modelscope.models.multi_modal.videocomposer.models.midas": ["torch", "math", "os"], "modelscope.models.multi_modal.videocomposer.clip": ["open_clip", "numpy", "torch", "torchvision"], "modelscope.models.multi_modal.videocomposer.data.tokenizers": ["html", "regex", "os", "functools", "ftfy", "tokenizers", "torch", "gzip"], "modelscope.models.multi_modal.videocomposer.data.transforms": ["PIL", "numpy", "torch", "math", "random", "torchvision"], "modelscope.models.multi_modal.videocomposer.data.samplers": ["numpy", "json", "torch", "os"], "modelscope.models.multi_modal.videocomposer.autoencoder": ["numpy", "torch"], "modelscope.models.multi_modal.soonet.tokenizer": ["html", "regex", "torch", "functools", "gzip", "ftfy"], "modelscope.models.multi_modal.soonet.swin_transformer": ["numpy", "torch"], "modelscope.models.multi_modal.soonet.model": ["torch", "os"], "modelscope.models.multi_modal.soonet.blocks": ["torch", "math"], "modelscope.models.multi_modal.soonet.utils": ["tqdm", "numpy", "copy", "decord"], "modelscope.models.multi_modal.soonet.clip": ["numpy", "typing", "torch", "warnings", "collections"], "modelscope.models.multi_modal.stable_diffusion.stable_diffusion": ["os", "functools", "packaging", "typing", "transformers", "torch", "diffusers"], "modelscope.models.multi_modal.ofa_for_all_tasks": ["string", "math", "os", "functools", "re", "typing", "torch", "json"], "modelscope.models.multi_modal.gemm.tokenizer": ["html", "regex", "torch", "os", "functools", "gzip", "ftfy"], "modelscope.models.multi_modal.gemm.gemm_base": ["numpy", "os", "typing", "torch", "collections", "json"], "modelscope.models.multi_modal.gemm.gemm_model": ["PIL", "numpy", "os", "typing", "torch", "json", "torchvision"], "modelscope.models.multi_modal.image_to_video.utils.config": ["logging", "torch", "os", "easydict", "datetime"], "modelscope.models.multi_modal.image_to_video.utils.shedule": ["torch", "math"], "modelscope.models.multi_modal.image_to_video.utils.diffusion": ["torch", "math"], "modelscope.models.multi_modal.image_to_video.utils.seed": ["numpy", "torch", "random"], "modelscope.models.multi_modal.image_to_video.utils.transforms": ["PIL", "numpy", "torch", "math", "random", "torchvision"], "modelscope.models.multi_modal.image_to_video.image_to_video_model": ["copy", "os", "random", "typing", "torch"], "modelscope.models.multi_modal.image_to_video.modules.embedder": ["numpy", "torch", "os", "open_clip", "torchvision"], "modelscope.models.multi_modal.image_to_video.modules.unet_i2v": ["einops", "math", "os", "torch", "xformers", "rotary_embedding_torch", "fairscale"], "modelscope.models.multi_modal.image_to_video.modules.autoencoder": ["collections", "numpy", "torch"], "modelscope.models.multi_modal.mmr.dataloaders.rawvideo_util": ["PIL", "numpy", "cv2", "torch", "torchvision"], "modelscope.models.multi_modal.mmr.models.module_cross": ["logging", "__future__", "torch", "collections", "json"], "modelscope.models.multi_modal.mmr.models.until_module": ["logging", "numpy", "torch", "math"], "modelscope.models.multi_modal.mmr.models.modeling": ["os", "torch", "collections", "platform", "types"], "modelscope.models.multi_modal.mmr.models.tokenization_clip": ["html", "regex", "os", "functools", "gzip", "ftfy"], "modelscope.models.multi_modal.mmr.models.module_clip": ["tqdm", "urllib", "os", "hashlib", "typing", "torch", "warnings", "collections"], "modelscope.models.multi_modal.mmr.models.clip_for_mm_video_embedding": ["PIL", "numpy", "tempfile", "os", "random", "typing", "uuid", "torch", "urllib", "json", "decord"], "modelscope.models.multi_modal.mmr.models.dynamic_inverted_softmax": ["numpy"], "modelscope.models.multi_modal.mplug_for_all_tasks": ["typing", "os"], "modelscope.models.multi_modal.multi_stage_diffusion.tokenizer": ["transformers", "html", "regex", "torch", "functools", "gzip", "ftfy"], "modelscope.models.multi_modal.multi_stage_diffusion.model": ["PIL", "numpy", "math", "os", "typing", "torch", "json"], "modelscope.models.multi_modal.multi_stage_diffusion.upsampler": ["torch", "math"], "modelscope.models.multi_modal.multi_stage_diffusion.prior": ["torch", "math"], "modelscope.models.multi_modal.multi_stage_diffusion.xglm": ["torch", "math"], "modelscope.models.multi_modal.multi_stage_diffusion.clip": ["torch", "math"], "modelscope.models.multi_modal.multi_stage_diffusion.decoder": ["torch", "math"], "modelscope.models.multi_modal.multi_stage_diffusion.gaussian_diffusion": ["torch", "math"], "modelscope.models.multi_modal.ofa_for_text_to_image_synthesis_model": ["PIL", "numpy", "taming", "os", "typing", "torch", "pkg_resources", "json", "torchvision"], "modelscope.models.multi_modal.vldoc.modeling_layout_roberta": ["transformers", "torch", "math", "os", "packaging"], "modelscope.models.multi_modal.vldoc.model": ["logging", "copy", "math", "os", "re", "sys", "torch", "json", "torchvision"], "modelscope.models.multi_modal.vldoc.conv_fpn_trans": ["timm", "apex", "random", "torch", "collections"], "modelscope.models.multi_modal.vldoc.processing": ["PIL", "numpy", "timm", "cv2", "typing", "torch", "collections", "torchvision"], "modelscope.models.multi_modal.vldoc.transformer_local": ["copy", "torch"], "modelscope.models.multi_modal.vldoc.convnext": ["timm", "torch", "os"], "modelscope.models.multi_modal.vldoc.tokenization": ["transformers", "os"], "modelscope.models.multi_modal.mgeo.text_classification": ["torch"], "modelscope.models.multi_modal.mgeo.text_ranking": ["torch"], "modelscope.models.multi_modal.mgeo.backbone": ["math", "os", "random", "dataclasses", "transformers", "typing", "torch", "warnings"], "modelscope.models.multi_modal.mgeo.token_classification": ["torch"], "modelscope.models.multi_modal.clip_interrogator.model": ["math", "os", "requests", "safetensors", "PIL", "numpy", "tqdm", "time", "open_clip", "hashlib", "dataclasses", "typing", "transformers", "torch", "torchvision"], "modelscope.models.multi_modal.ofa.configuration_mmspeech": ["transformers", "warnings"], "modelscope.models.multi_modal.ofa.resnet": ["torch"], "modelscope.models.multi_modal.ofa.utils.utils": ["typing", "torch"], "modelscope.models.multi_modal.ofa.utils.constant": [], "modelscope.models.multi_modal.ofa.generate.incremental_decoding_utils": ["typing", "torch", "uuid"], "modelscope.models.multi_modal.ofa.generate.sequence_generator": ["sys", "typing", "torch", "math"], "modelscope.models.multi_modal.ofa.generate.multihead_attention": ["typing", "fairseq", "torch", "math"], "modelscope.models.multi_modal.ofa.generate.utils": ["amp_C", "torch", "itertools", "torch_xla", "collections"], "modelscope.models.multi_modal.ofa.generate.token_generation_constraints": ["collections", "typing", "torch"], "modelscope.models.multi_modal.ofa.generate.search": ["typing", "torch", "math"], "modelscope.models.multi_modal.ofa.generate.ngram_repeat_block": ["typing", "torch", "math", "warnings", "fairseq"], "modelscope.models.multi_modal.ofa.vit": ["collections", "fairseq", "torch"], "modelscope.models.multi_modal.ofa.tokenization_ofa_fast": ["typing", "json", "transformers", "tokenizers"], "modelscope.models.multi_modal.ofa.configuration_ofa": ["transformers", "warnings"], "modelscope.models.multi_modal.ofa.modeling_ofa": ["apex", "math", "packaging", "random", "dataclasses", "typing", "transformers", "torch"], "modelscope.models.multi_modal.ofa.tokenization_ofa": ["collections", "typing", "transformers", "os"], "modelscope.models.multi_modal.ofa.modeling_mmspeech": ["numpy", "apex", "math", "packaging", "fairseq", "dataclasses", "typing", "transformers", "torch"], "modelscope.models.multi_modal.video_to_video.utils.schedules_sdedit": ["torch", "math"], "modelscope.models.multi_modal.video_to_video.utils.config": ["logging", "torch", "os", "easydict", "datetime"], "modelscope.models.multi_modal.video_to_video.utils.solvers_sdedit": ["tqdm", "torchsde", "torch"], "modelscope.models.multi_modal.video_to_video.utils.diffusion_sdedit": ["torch", "random"], "modelscope.models.multi_modal.video_to_video.utils.seed": ["numpy", "torch", "random"], "modelscope.models.multi_modal.video_to_video.utils.transforms": ["PIL", "numpy", "torch", "math", "random", "torchvision"], "modelscope.models.multi_modal.video_to_video.video_to_video_model": ["copy", "os", "random", "typing", "torch"], "modelscope.models.multi_modal.video_to_video.modules.embedder": ["numpy", "torch", "os", "open_clip", "torchvision"], "modelscope.models.multi_modal.video_to_video.modules.unet_v2v": ["einops", "math", "os", "torch", "xformers", "rotary_embedding_torch", "fairscale"], "modelscope.models.multi_modal.video_to_video.modules.autoencoder": ["collections", "numpy", "torch"], "modelscope.models.multi_modal.rleg.rleg": ["typing", "torch", "torchvision"], "modelscope.models.multi_modal.rleg.model": ["json", "torch", "os"], "modelscope.models.multi_modal.team.team_model": ["PIL", "numpy", "cv2", "tokenizers", "typing", "torch", "torchvision"], "modelscope.models.multi_modal.team.utils": ["numpy", "transformers", "typing", "torch", "collections"], "modelscope.models.multi_modal.clip.model": ["numpy", "os", "typing", "torch", "collections", "json"], "modelscope.models.multi_modal.clip.configuration_bert": ["logging", "__future__"], "modelscope.models.multi_modal.clip.modeling_bert": ["logging", "__future__", "math", "os", "sys", "torch", "io", "json"], "modelscope.models.multi_modal.clip.bert_tokenizer": ["__future__", "os", "re", "collections", "unicodedata", "six"], "modelscope.models.multi_modal.guided_diffusion.script": [], "modelscope.models.multi_modal.guided_diffusion.respace": ["numpy", "torch"], "modelscope.models.multi_modal.guided_diffusion.gaussian_diffusion": ["numpy", "torch", "enum", "math"], "modelscope.models.multi_modal.guided_diffusion.unet": ["numpy", "transformers", "torch", "math", "abc"], "modelscope.models.multi_modal.efficient_diffusion_tuning.efficient_stable_diffusion": ["os", "functools", "transformers", "typing", "torch", "diffusers"], "modelscope.models.base.base_torch_head": ["typing", "torch"], "modelscope.models.base.base_head": ["typing", "abc"], "modelscope.models.base.base_torch_model": ["copy", "os", "functools", "packaging", "typing", "torch"], "modelscope.models.base.base_model": ["typing", "abc", "os"], "modelscope.models.audio.kws.farfield.fsmn_sele_v3": ["torch"], "modelscope.models.audio.kws.farfield.fsmn": ["numpy", "torch"], "modelscope.models.audio.kws.farfield.model": ["typing", "tempfile", "os"], "modelscope.models.audio.kws.farfield.model_def": ["enum", "math", "struct"], "modelscope.models.audio.kws.farfield.fsmn_sele_v2": ["torch"], "modelscope.models.audio.kws.generic_key_word_spotting": ["typing", "os"], "modelscope.models.audio.kws.nearfield.cmvn": ["re", "numpy", "torch"], "modelscope.models.audio.kws.nearfield.fsmn": ["numpy", "torch", "typing"], "modelscope.models.audio.kws.nearfield.model": ["tempfile", "os", "sys", "typing", "torch"], "modelscope.models.audio.aec.network.loss": ["torch"], "modelscope.models.audio.aec.network.se_net": ["torch"], "modelscope.models.audio.aec.network.modulation_loss": ["torchaudio", "torch", "math"], "modelscope.models.audio.aec.layers.activations": ["torch"], "modelscope.models.audio.aec.layers.affine_transform": ["numpy", "torch"], "modelscope.models.audio.aec.layers.uni_deep_fsmn": ["numpy", "torch"], "modelscope.models.audio.aec.layers.layer_base": ["numpy", "torch", "abc", "re"], "modelscope.models.audio.aec.layers.deep_fsmn": ["numpy", "torch"], "modelscope.models.audio.ans.complex_nn": ["torch"], "modelscope.models.audio.ans.denoise_net": ["torch"], "modelscope.models.audio.ans.conv_stft": ["scipy", "numpy", "torch"], "modelscope.models.audio.ans.se_module_complex": ["torch"], "modelscope.models.audio.ans.frcrn": ["typing", "torch", "os"], "modelscope.models.audio.ans.layers.activations": ["torch"], "modelscope.models.audio.ans.layers.affine_transform": ["torch"], "modelscope.models.audio.ans.layers.uni_deep_fsmn": ["numpy", "torch"], "modelscope.models.audio.ans.layers.layer_base": ["numpy", "torch", "abc", "six"], "modelscope.models.audio.ans.unet": ["torch"], "modelscope.models.audio.asr.generic_automatic_speech_recognition": ["typing", "os"], "modelscope.models.audio.asr.wenet_automatic_speech_recognition": ["wenetruntime", "json", "typing", "os"], "modelscope.models.audio.tts.voice": ["numpy", "time", "os", "kantts", "threading", "yaml", "pickle", "torch", "collections", "json"], "modelscope.models.audio.tts.sambert_hifi": ["numpy", "__future__", "os", "shutil", "matplotlib", "yaml", "json", "wave", "zipfile", "datetime"], "modelscope.models.audio.itn.generic_inverse_text_processing": ["typing", "os"], "modelscope.models.audio.sv.ERes2Net_aug": ["math", "os", "torchaudio", "typing", "torch"], "modelscope.models.audio.sv.DTDNN_layers": ["torch"], "modelscope.models.audio.sv.speaker_diarization_dialogue_detection": ["torch"], "modelscope.models.audio.sv.lanuage_recognition_model": ["numpy", "os", "torchaudio", "typing", "torch"], "modelscope.models.audio.sv.ERes2Net": ["math", "os", "torchaudio", "typing", "torch"], "modelscope.models.audio.sv.ecapa_tdnn": ["numpy", "math", "os", "torchaudio", "typing", "torch"], "modelscope.models.audio.sv.pooling_layers": ["torch"], "modelscope.models.audio.sv.rdino": ["math", "os", "torchaudio", "typing", "torch"], "modelscope.models.audio.sv.speaker_diarization_semantic_speaker_turn_detection": ["torch"], "modelscope.models.audio.sv.speaker_change_locator": ["numpy", "os", "torchaudio", "typing", "torch", "collections"], "modelscope.models.audio.sv.fusion": ["torch"], "modelscope.models.audio.sv.DTDNN": ["numpy", "os", "torchaudio", "typing", "torch", "collections"], "modelscope.models.audio.sv.cluster_backend": ["numpy", "umap", "sklearn", "typing", "scipy", "hdbscan"], "modelscope.models.audio.sv.generic_speaker_verification": ["typing", "os"], "modelscope.models.audio.punc.generic_punctuation": ["typing", "os"], "modelscope.models.audio.separation.mossformer": ["typing", "copy", "torch", "os"], "modelscope.models.audio.separation.mossformer_block": ["torch"], "modelscope.models.audio.separation.layer_norm": ["__future__", "torch"], "modelscope.models.audio.separation.mossformer_conv_module": ["torch"], "modelscope.models.cv.face_generation.stylegan2": ["operator", "math", "functools", "random", "torch"], "modelscope.models.cv.face_generation.op.fused_act": ["torch", "os"], "modelscope.models.cv.face_generation.op.upfirdn2d": ["collections", "torch", "os"], "modelscope.models.cv.face_generation.op.conv2d_gradfix": ["torch", "contextlib", "warnings"], "modelscope.models.cv.image_depth_estimation.networks.swin_transformer": ["timm", "numpy", "torch"], "modelscope.models.cv.image_depth_estimation.networks.newcrf_depth": ["torch"], "modelscope.models.cv.image_depth_estimation.networks.uper_crf_head": ["mmcv", "torch"], "modelscope.models.cv.image_depth_estimation.networks.newcrf_utils": ["pkgutil", "importlib", "warnings", "torch", "os", "collections", "torchvision"], "modelscope.models.cv.image_depth_estimation.networks.newcrf_layers": ["timm", "numpy", "torch"], "modelscope.models.cv.image_depth_estimation.newcrfs_model": ["numpy", "torch", "os"], "modelscope.models.cv.image_semantic_segmentation.ddpm_seg.data_util": [], "modelscope.models.cv.image_semantic_segmentation.ddpm_seg.feature_extractors": ["typing", "torch"], "modelscope.models.cv.image_semantic_segmentation.ddpm_seg.utils": ["PIL", "numpy", "torch", "random"], "modelscope.models.cv.image_semantic_segmentation.ddpm_seg.pixel_classifier": ["PIL", "numpy", "os", "torch", "collections"], "modelscope.models.cv.image_semantic_segmentation.pan_merge.maskformer_semantic_head": ["mmdet", "torch"], "modelscope.models.cv.image_semantic_segmentation.pan_merge.base_panoptic_fusion_head": ["mmcv", "mmdet", "abc"], "modelscope.models.cv.image_semantic_segmentation.semantic_seg_model": ["numpy", "torch", "os"], "modelscope.models.cv.image_semantic_segmentation.vit_adapter.utils.data_process_func": ["mmcv", "mmdet"], "modelscope.models.cv.image_semantic_segmentation.vit_adapter.utils.builder": ["mmcv"], "modelscope.models.cv.image_semantic_segmentation.vit_adapter.utils.seg_func": ["torch", "warnings"], "modelscope.models.cv.image_semantic_segmentation.vit_adapter.models.segmentors.base_segmentor": ["numpy", "torch", "warnings", "mmcv", "collections", "abc"], "modelscope.models.cv.image_semantic_segmentation.vit_adapter.models.segmentors.encoder_decoder_mask2former": ["mmdet", "torch"], "modelscope.models.cv.image_semantic_segmentation.vit_adapter.models.decode_heads.mask2former_head_from_mmseg": ["mmcv", "copy", "mmdet", "torch"], "modelscope.models.cv.image_semantic_segmentation.vit_adapter.models.decode_heads.base_decode_head": ["mmcv", "mmdet", "abc", "torch"], "modelscope.models.cv.image_semantic_segmentation.vit_adapter.models.backbone.base.beit": ["timm", "mmdet", "torch", "math", "mmcv", "functools"], "modelscope.models.cv.image_semantic_segmentation.vit_adapter.models.backbone.beit_adapter": ["logging", "timm", "math", "mmdet", "torch"], "modelscope.models.cv.image_semantic_segmentation.vit_adapter.models.backbone.adapter_modules": ["logging", "timm", "mmdet", "torch", "functools"], "modelscope.models.cv.image_semantic_segmentation.ddpm_segmentation_model": ["ddpm_guided_diffusion", "typing", "torch", "os"], "modelscope.models.cv.image_quality_assessment_degradation.image_quality_assessment_degradation": ["typing", "torch", "os"], "modelscope.models.cv.image_quality_assessment_degradation.degradation_model": ["numpy", "torch", "cv2", "time", "collections", "json", "torchvision"], "modelscope.models.cv.image_panoptic_segmentation.panseg_model": ["torch", "os"], "modelscope.models.cv.image_quality_assessment_man.maniqa": ["timm", "torch", "einops"], "modelscope.models.cv.image_quality_assessment_man.swin": ["warnings", "einops", "itertools", "math", "torch", "collections"], "modelscope.models.cv.image_quality_assessment_man.image_quality_assessment_man": ["typing", "torch", "os"], "modelscope.models.cv.video_summarization.pgl_sum": ["torch", "math"], "modelscope.models.cv.video_summarization.summarizer": ["numpy", "torch", "typing", "os"], "modelscope.models.cv.video_summarization.kts.cpd_auto": ["numpy"], "modelscope.models.cv.video_summarization.kts.cpd_nonlin": ["numpy"], "modelscope.models.cv.video_summarization.base_model": ["numpy", "torch", "cv2"], "modelscope.models.cv.body_2d_keypoints.hrnet_basic_modules": ["torch"], "modelscope.models.cv.body_2d_keypoints.hrnet_v2": ["numpy", "torch", "os"], "modelscope.models.cv.body_2d_keypoints.w48": [], "modelscope.models.cv.image_defrcn_fewshot.utils.requirements_check": ["importlib_metadata", "importlib", "packaging", "sys", "collections"], "modelscope.models.cv.image_defrcn_fewshot.utils.register_data": [], "modelscope.models.cv.image_defrcn_fewshot.utils.model_surgery_op": ["argparse", "torch", "os"], "modelscope.models.cv.image_defrcn_fewshot.utils.coco_register": ["detectron2", "pycocotools", "fvcore", "io", "os", "contextlib"], "modelscope.models.cv.image_defrcn_fewshot.utils.configuration_mapper": ["detectron2"], "modelscope.models.cv.image_defrcn_fewshot.utils.voc_register": ["detectron2", "numpy", "fvcore", "xml", "os"], "modelscope.models.cv.image_defrcn_fewshot.models.gdl": ["torch"], "modelscope.models.cv.image_defrcn_fewshot.models.fast_rcnn": ["detectron2", "numpy", "torch", "fvcore"], "modelscope.models.cv.image_defrcn_fewshot.models.calibration_layer": ["detectron2", "sklearn", "torch", "cv2"], "modelscope.models.cv.image_defrcn_fewshot.models.defrcn": ["detectron2", "typing", "torch", "os"], "modelscope.models.cv.image_defrcn_fewshot.models.resnet": ["torch", "torchvision"], "modelscope.models.cv.image_defrcn_fewshot.models.roi_heads": ["detectron2", "torch"], "modelscope.models.cv.image_defrcn_fewshot.defrcn_for_fewshot": ["typing", "torch", "os"], "modelscope.models.cv.image_defrcn_fewshot.evaluation.evaluator": ["logging", "time", "detectron2", "torch", "datetime"], "modelscope.models.cv.image_defrcn_fewshot.evaluation.pascal_voc_evaluation": ["detectron2", "numpy", "tempfile", "os", "collections"], "modelscope.models.cv.image_defrcn_fewshot.evaluation.coco_evaluation": ["logging", "numpy", "copy", "pycocotools", "itertools", "os", "fvcore", "contextlib", "detectron2", "torch", "tabulate", "io", "collections", "json"], "modelscope.models.cv.object_detection_3d.depe.result_vis": ["mmdet3d", "numpy", "cv2", "os", "argparse", "pyquaternion", "pickle", "json"], "modelscope.models.cv.object_detection_3d.depe.depe_detect": ["numpy", "torch", "typing", "os"], "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.core.bbox.coders.nms_free_coder": ["mmdet", "torch"], "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.core.bbox.match_costs.match_cost": ["mmdet", "torch"], "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.core.bbox.assigners.hungarian_assigner_3d": ["scipy", "mmdet", "torch"], "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.core.bbox.util": ["mmdet3d", "numpy", "torch"], "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.models.detectors.petr3d": ["mmdet3d", "numpy", "mmdet", "torch", "mmcv"], "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.models.dense_heads.depth_net": ["mmcv", "mmdet", "torch"], "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.models.dense_heads.petrv2_dednhead": ["mmdet3d", "numpy", "copy", "math", "mmdet", "torch", "mmcv"], "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.models.utils.positional_encoding": ["mmcv", "torch", "math"], "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.models.utils.petr_transformer": ["typing", "copy", "mmdet", "torch", "math", "warnings", "mmcv"], "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.models.backbones.vovnet": ["mmcv", "collections", "mmdet", "torch"], "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.models.necks.cp_fpn": ["mmcv", "mmdet", "torch"], "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.datasets.pipelines.loading": ["mmcv", "numpy", "mmdet"], "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.datasets.pipelines.transform_3d": ["mmdet3d", "numpy", "copy", "PIL", "mmdet", "torch", "mmcv"], "modelscope.models.cv.object_detection_3d.depe.mmdet3d_plugin.datasets.nuscenes_dataset": ["mmdet3d", "numpy", "mmdet"], "modelscope.models.cv.animal_recognition.resnet": ["torch", "math"], "modelscope.models.cv.animal_recognition.splat": ["torch"], "modelscope.models.cv.image_face_fusion.network.ops": ["torch"], "modelscope.models.cv.image_face_fusion.network.aei_flow_net": ["torch"], "modelscope.models.cv.image_face_fusion.network.aad_layer": ["torch"], "modelscope.models.cv.image_face_fusion.network.dense_motion": ["torch"], "modelscope.models.cv.image_face_fusion.network.model_irse": ["collections", "torch"], "modelscope.models.cv.image_face_fusion.network.bfm": ["scipy", "numpy", "torch", "os"], "modelscope.models.cv.image_face_fusion.network.facerecon_model": ["typing", "torch", "os"], "modelscope.models.cv.image_face_fusion.image_face_fusion": ["PIL", "numpy", "cv2", "os", "typing", "torch", "collections", "torchvision"], "modelscope.models.cv.image_face_fusion.facegan.face_gan": ["numpy", "torch", "os"], "modelscope.models.cv.image_face_fusion.facegan.op.fused_act": ["torch"], "modelscope.models.cv.image_face_fusion.facegan.op.upfirdn2d": ["collections", "torch"], "modelscope.models.cv.image_face_fusion.facegan.op.conv2d_gradfix": ["torch", "contextlib", "warnings"], "modelscope.models.cv.image_face_fusion.facegan.gpen_model": ["operator", "itertools", "math", "functools", "random", "torch"], "modelscope.models.cv.image_face_fusion.facelib.align_trans": ["numpy", "cv2"], "modelscope.models.cv.image_face_fusion.facelib.matlab_cp2tform": ["numpy"], "modelscope.models.cv.image_deblur.nafnet_for_image_deblur": ["typing", "torch", "os"], "modelscope.models.cv.text_to_360panorama_image.pipeline_base": ["diffusers", "packaging", "re", "typing", "transformers", "torch", "warnings", "inspect"], "modelscope.models.cv.text_to_360panorama_image.pipeline_sr": ["PIL", "numpy", "copy", "diffusers", "os", "re", "typing", "transformers", "torch", "warnings", "inspect"], "modelscope.models.cv.image_matching.quadtree_attention_model": ["numpy", "cv2", "os", "pathlib", "torch"], "modelscope.models.cv.image_matching.utils.misc": ["yacs"], "modelscope.models.cv.image_matching.loftr_quadtree.loftr_module.linear_attention": ["torch"], "modelscope.models.cv.image_matching.loftr_quadtree.loftr_module.quadtree_attention": ["timm", "torch"], "modelscope.models.cv.image_matching.loftr_quadtree.loftr_module.fine_preprocess": ["torch", "einops"], "modelscope.models.cv.image_matching.loftr_quadtree.loftr_module.transformer": ["timm", "copy", "einops", "math", "torch"], "modelscope.models.cv.image_matching.loftr_quadtree.utils.fine_matching": ["torch", "math", "kornia"], "modelscope.models.cv.image_matching.loftr_quadtree.utils.position_encoding": ["torch", "math"], "modelscope.models.cv.image_matching.loftr_quadtree.utils.coarse_matching": ["torch", "einops"], "modelscope.models.cv.image_matching.loftr_quadtree.loftr": ["torch", "einops"], "modelscope.models.cv.image_matching.loftr_quadtree.backbone.resnet_fpn": ["torch"], "modelscope.models.cv.image_matching.config.default": ["yacs"], "modelscope.models.cv.video_super_resolution.real_basicvsr_for_video_super_resolution": ["typing", "torch", "os"], "modelscope.models.cv.video_super_resolution.msrresnet_lite_model": ["functools", "torch", "typing", "os"], "modelscope.models.cv.video_super_resolution.real_basicvsr_net": ["torch"], "modelscope.models.cv.video_super_resolution.basicvsr_net": ["torch"], "modelscope.models.cv.video_super_resolution.common": ["torch"], "modelscope.models.cv.image_classification.mmcls_model": ["os"], "modelscope.models.cv.image_classification.backbones.beit_v2": ["einops", "math", "itertools", "os", "functools", "mmcls", "typing", "torch", "warnings", "mmcv", "collections"], "modelscope.models.cv.image_classification.backbones.nextvit": ["einops", "math", "itertools", "os", "functools", "mmcls", "typing", "torch", "warnings", "mmcv", "collections"], "modelscope.models.cv.image_classification.utils": ["numpy", "torch", "math", "os", "itertools", "collections", "mmcls"], "modelscope.models.cv.image_classification.resnet50_cc": ["math", "os", "torch", "collections", "torchvision"], "modelscope.models.cv.face_reconstruction.utils": ["PIL", "numpy", "cv2", "math", "argparse", "os", "numba", "torch", "array", "scipy"], "modelscope.models.cv.face_reconstruction.models.facelandmark.large_base_lmks_infer": ["numpy", "torch"], "modelscope.models.cv.face_reconstruction.models.facelandmark.nets.large_eyeball_net": ["torch"], "modelscope.models.cv.face_reconstruction.models.facelandmark.nets.large_base_lmks_net": ["torch"], "modelscope.models.cv.face_reconstruction.models.losses": ["numpy", "torch", "kornia"], "modelscope.models.cv.face_reconstruction.models.networks": ["typing", "torch", "kornia", "os"], "modelscope.models.cv.face_reconstruction.models.nv_diffrast": ["numpy", "typing", "torch", "warnings", "nvdiffrast"], "modelscope.models.cv.face_reconstruction.models.renderer": ["numpy", "torch", "imageio", "skimage"], "modelscope.models.cv.face_reconstruction.models.bfm": ["scipy", "numpy", "torch", "os"], "modelscope.models.cv.face_reconstruction.models.de_retouching_module": ["torch"], "modelscope.models.cv.face_reconstruction.models.facerecon_model": ["numpy", "cv2", "os", "torch", "collections"], "modelscope.models.cv.face_reconstruction.models.pix2pix.pix2pix_options": [], "modelscope.models.cv.face_reconstruction.models.pix2pix.networks": ["functools", "torch"], "modelscope.models.cv.face_reconstruction.models.pix2pix.pix2pix_model": ["torch"], "modelscope.models.cv.face_reconstruction.models.opt": [], "modelscope.models.cv.face_reconstruction.models.unet": ["torch", "warnings"], "modelscope.models.cv.image_to_image_generation.model": ["torch", "math"], "modelscope.models.cv.image_to_image_generation.ops.losses": ["torch", "math"], "modelscope.models.cv.image_to_image_generation.ops.diffusion": ["torch", "math"], "modelscope.models.cv.image_to_image_generation.models.clip": ["torch", "math"], "modelscope.models.cv.image_to_image_generation.models.autoencoder": ["torch", "math"], "modelscope.models.cv.image_to_image_generation.data.transforms": ["PIL", "torchvision", "math", "random"], "modelscope.models.cv.text_driven_segmentation.lseg_vit": ["timm", "torch", "math", "types"], "modelscope.models.cv.text_driven_segmentation.lseg_blocks": ["torch"], "modelscope.models.cv.text_driven_segmentation.simple_tokenizer": ["html", "regex", "os", "functools", "gzip", "ftfy"], "modelscope.models.cv.text_driven_segmentation.model": ["collections", "numpy", "torch", "typing"], "modelscope.models.cv.text_driven_segmentation.lseg_model": ["PIL", "numpy", "os", "typing", "torch", "json"], "modelscope.models.cv.text_driven_segmentation.lseg_base": ["torch"], "modelscope.models.cv.text_driven_segmentation.lseg_net": ["numpy", "torch"], "modelscope.models.cv.text_driven_segmentation.clip": ["PIL", "tqdm", "urllib", "os", "hashlib", "typing", "torch", "warnings", "pkg_resources", "torchvision"], "modelscope.models.cv.cartoon.loss": ["numpy", "skimage", "os", "joblib", "tensorflow", "scipy"], "modelscope.models.cv.cartoon.model_tf": ["typing", "tensorflow"], "modelscope.models.cv.cartoon.utils": ["numpy", "cv2", "os", "tensorflow", "random"], "modelscope.models.cv.cartoon.network": ["tensorflow"], "modelscope.models.cv.cartoon.mtcnn_pytorch.src.align_trans": ["numpy", "cv2"], "modelscope.models.cv.cartoon.mtcnn_pytorch.src.matlab_cp2tform": ["numpy"], "modelscope.models.cv.cartoon.facelib.config": ["numpy", "easydict", "os"], "modelscope.models.cv.cartoon.facelib.LK.lk": ["numpy"], "modelscope.models.cv.cartoon.facelib.face_detector": ["numpy", "tensorflow", "cv2", "time"], "modelscope.models.cv.cartoon.facelib.face_landmark": ["numpy", "cv2", "tensorflow"], "modelscope.models.cv.cartoon.facelib.facer": ["numpy", "cv2", "time"], "modelscope.models.cv.hand_static.hand_model": ["PIL", "numpy", "cv2", "os", "sys", "torch", "torchvision"], "modelscope.models.cv.hand_static.networks": ["os", "torch", "torchvision"], "modelscope.models.cv.movie_scene_segmentation.get_model": [], "modelscope.models.cv.movie_scene_segmentation.model": ["PIL", "numpy", "tqdm", "einops", "math", "os", "typing", "torch", "shotdetect_scenedetect_lgss", "torchvision"], "modelscope.models.cv.movie_scene_segmentation.utils.head": ["torch"], "modelscope.models.cv.movie_scene_segmentation.utils.save_op": ["tqdm", "numpy", "cv2", "os", "subprocess"], "modelscope.models.cv.movie_scene_segmentation.utils.trn": ["transformers", "torch"], "modelscope.models.cv.movie_scene_segmentation.utils.shot_encoder": ["typing", "torch"], "modelscope.models.cv.image_skychange.skychange_model": ["pdb", "math", "os", "cv2", "time", "typing", "torch", "collections", "json"], "modelscope.models.cv.image_skychange.ptsemseg.hrnet_super_and_ocr": ["numpy", "torch", "__future__"], "modelscope.models.cv.image_skychange.ptsemseg.hrnet_backnone": ["logging", "numpy", "torch", "os"], "modelscope.models.cv.image_skychange.ptsemseg.BlockModules": ["torch"], "modelscope.models.cv.image_skychange.ptsemseg.unet": ["torch"], "modelscope.models.cv.image_skychange.preprocessor": ["numpy", "pdb", "cv2", "typing", "torch", "numbers", "json", "torchvision"], "modelscope.models.cv.image_skychange.skychange": ["PIL", "numpy", "pdb", "cv2", "os", "torch", "numbers", "collections", "json", "torchvision"], "modelscope.models.cv.image_inpainting.refinement": ["tqdm", "numpy", "cv2", "kornia", "torch"], "modelscope.models.cv.image_inpainting.model": ["typing", "torch", "os"], "modelscope.models.cv.image_inpainting.default": ["bisect", "torch"], "modelscope.models.cv.image_inpainting.modules.inception": ["torch", "torchvision"], "modelscope.models.cv.image_inpainting.modules.pix2pixhd": ["logging", "numpy", "torch", "collections", "functools"], "modelscope.models.cv.image_inpainting.modules.ffc": ["numpy", "torch", "kornia"], "modelscope.models.cv.image_inpainting.modules.feature_matching": ["typing", "torch"], "modelscope.models.cv.image_inpainting.modules.perceptual": ["torch", "torchvision"], "modelscope.models.cv.image_inpainting.modules.adversarial": ["typing", "torch"], "modelscope.models.cv.image_inpainting.modules.ade20k.resnet": ["torch", "math", "os"], "modelscope.models.cv.image_inpainting.modules.ade20k.base": ["torch", "os"], "modelscope.models.cv.image_inpainting.base": ["typing", "torch"], "modelscope.models.cv.nerf_recon_acc.nerf_recon_acc": ["tqdm", "numpy", "cv2", "os", "time", "glob", "torch"], "modelscope.models.cv.nerf_recon_acc.network.nerf": ["tinycudann", "numpy", "torch", "nerfacc"], "modelscope.models.cv.nerf_recon_acc.network.utils": ["tinycudann", "numpy", "torch", "gc", "collections", "mcubes"], "modelscope.models.cv.nerf_recon_acc.network.segmenter": ["numpy", "tensorflow"], "modelscope.models.cv.nerf_recon_acc.nerf_preprocess": ["numpy", "cv2", "os", "subprocess", "tensorflow", "typing", "glob"], "modelscope.models.cv.nerf_recon_acc.dataloader.nerf_dataset": ["PIL", "numpy", "math", "os", "torch", "json", "torchvision"], "modelscope.models.cv.nerf_recon_acc.dataloader.read_write_model": ["numpy", "os", "argparse", "collections", "struct"], "modelscope.models.cv.nerf_recon_4k.nerf_recon_4k": ["tqdm", "numpy", "os", "argparse", "time", "imageio", "random", "torch", "mmcv"], "modelscope.models.cv.nerf_recon_4k.network.utils": ["tinycudann", "numpy", "torch", "gc", "collections", "mcubes"], "modelscope.models.cv.nerf_recon_4k.network.dvgo": ["torch_scatter", "numpy", "copy", "math", "time", "os", "functools", "torch"], "modelscope.models.cv.nerf_recon_4k.nerf_preprocess": ["numpy", "cv2", "os", "subprocess", "tensorflow", "typing", "glob"], "modelscope.models.cv.nerf_recon_4k.dataloader.load_data": ["numpy"], "modelscope.models.cv.nerf_recon_4k.dataloader.load_llff": ["numpy", "torch", "os", "scipy", "imageio"], "modelscope.models.cv.nerf_recon_4k.dataloader.load_tankstemple": ["numpy", "imageio", "glob", "os"], "modelscope.models.cv.nerf_recon_4k.dataloader.read_write_model": ["numpy", "os", "argparse", "collections", "struct"], "modelscope.models.cv.nerf_recon_4k.dataloader.load_blender": ["numpy", "torch", "cv2", "os", "imageio", "json"], "modelscope.models.cv.open_vocabulary_detection_vild.vild": ["numpy", "os", "tensorflow", "clip", "typing", "torch", "scipy"], "modelscope.models.cv.pointcloud_sceneflow_estimation.rcp_model": ["numpy", "torch", "os"], "modelscope.models.cv.pointcloud_sceneflow_estimation.pointnet2_utils": ["typing", "torch", "pointnet2_cuda"], "modelscope.models.cv.pointcloud_sceneflow_estimation.sf_rcp": ["torch"], "modelscope.models.cv.pointcloud_sceneflow_estimation.common": ["torch", "math"], "modelscope.models.cv.vop_retrieval.model": ["torch", "os"], "modelscope.models.cv.vop_retrieval.basic_utils": ["PIL", "numpy", "cv2", "os", "shutil", "random", "pickle", "torch", "collections", "zipfile", "ujson", "torchvision"], "modelscope.models.cv.vop_retrieval.tokenization_clip": ["html", "regex", "torch", "os", "functools", "gzip", "ftfy"], "modelscope.models.cv.vop_retrieval.backbone": ["tqdm", "numpy", "urllib", "os", "hashlib", "typing", "torch", "warnings", "collections"], "modelscope.models.cv.vop_retrieval.model_se": ["torch", "os"], "modelscope.models.cv.stream_yolo.utils.boxes": ["torch", "torchvision"], "modelscope.models.cv.stream_yolo.utils.format": ["math"], "modelscope.models.cv.stream_yolo.models.tal_head": ["torch"], "modelscope.models.cv.stream_yolo.models.streamyolo": ["torch"], "modelscope.models.cv.stream_yolo.models.darknet": ["torch"], "modelscope.models.cv.stream_yolo.models.network_blocks": ["torch"], "modelscope.models.cv.stream_yolo.models.dfp_pafpn": ["torch"], "modelscope.models.cv.stream_yolo.realtime_video_detector": ["logging", "tqdm", "numpy", "cv2", "time", "argparse", "os", "torch", "json"], "modelscope.models.cv.stream_yolo.data.data_augment": ["numpy", "math", "cv2", "random"], "modelscope.models.cv.stream_yolo.exp.build": ["sys", "os"], "modelscope.models.cv.stream_yolo.exp.default.streamyolo": ["sys", "torch", "os"], "modelscope.models.cv.stream_yolo.exp.base_exp": ["abc", "torch"], "modelscope.models.cv.stream_yolo.exp.yolox_base": ["torch", "random", "os"], "modelscope.models.cv.face_attribute_recognition.fair_face.face_attribute_recognition": ["PIL", "numpy", "cv2", "os", "torch", "torchvision"], "modelscope.models.cv.video_depth_estimation.geometry.camera": ["functools", "torch"], "modelscope.models.cv.video_depth_estimation.geometry.pose_utils": ["numpy", "torch"], "modelscope.models.cv.video_depth_estimation.geometry.camera_utils": ["torch"], "modelscope.models.cv.video_depth_estimation.geometry.pose": ["torch"], "modelscope.models.cv.video_depth_estimation.configs.default_config": ["yacs", "os"], "modelscope.models.cv.video_depth_estimation.networks.depth_pose.depth_pose_net": ["functools", "torch"], "modelscope.models.cv.video_depth_estimation.networks.optim.update": ["torch"], "modelscope.models.cv.video_depth_estimation.networks.optim.extractor": ["torch", "torchvision"], "modelscope.models.cv.video_depth_estimation.networks.layers.resnet.resnet_encoder": ["numpy", "__future__", "torch", "torchvision"], "modelscope.models.cv.video_depth_estimation.networks.layers.resnet.pose_decoder": ["collections", "__future__", "torch"], "modelscope.models.cv.video_depth_estimation.networks.layers.resnet.depth_decoder": ["collections", "numpy", "torch", "__future__"], "modelscope.models.cv.video_depth_estimation.networks.layers.resnet.layers": ["__future__", "torch"], "modelscope.models.cv.video_depth_estimation.utils.image": ["PIL", "numpy", "cv2", "os", "functools", "torch"], "modelscope.models.cv.video_depth_estimation.utils.types": ["yacs", "numpy", "torch"], "modelscope.models.cv.video_depth_estimation.utils.config": ["yacs", "torch", "datetime", "os"], "modelscope.models.cv.video_depth_estimation.utils.image_gt": ["PIL", "functools", "torch", "cv2"], "modelscope.models.cv.video_depth_estimation.utils.misc": ["termcolor"], "modelscope.models.cv.video_depth_estimation.utils.augmentations": ["PIL", "numpy", "cv2", "random", "torchvision"], "modelscope.models.cv.video_depth_estimation.utils.load": ["logging", "importlib", "os", "torch", "warnings", "collections", "inspect"], "modelscope.models.cv.video_depth_estimation.utils.depth": ["numpy", "matplotlib", "torch", "torchvision"], "modelscope.models.cv.video_depth_estimation.utils.horovod": ["horovod"], "modelscope.models.cv.video_depth_estimation.models.model_utils": [], "modelscope.models.cv.video_depth_estimation.models.sfm_model_mf": ["torch", "random"], "modelscope.models.cv.video_depth_estimation.models.model_wrapper": ["importlib", "numpy", "random", "torch", "collections"], "modelscope.models.cv.video_depth_estimation.models.model_checkpoint": ["numpy", "torch", "re", "os"], "modelscope.models.cv.video_depth_estimation.models.sup_model_mf": [], "modelscope.models.cv.video_depth_estimation.dro_model": ["tqdm", "numpy", "cv2", "os", "glob", "torch"], "modelscope.models.cv.indoor_layout_estimation.panovit": ["yacs", "numpy", "torch", "os"], "modelscope.models.cv.indoor_layout_estimation.networks.panovit": ["numpy", "torch"], "modelscope.models.cv.indoor_layout_estimation.networks.modality.layout": ["numpy", "math", "torch", "shapely", "scipy"], "modelscope.models.cv.indoor_layout_estimation.networks.utils": ["numpy", "torch"], "modelscope.models.cv.indoor_layout_estimation.networks.backbone.vit_horizon_pry_image": ["timm", "numpy", "torch"], "modelscope.models.cv.indoor_layout_estimation.networks.backbone.resnet_DA": ["torch", "torchvision"], "modelscope.models.cv.indoor_layout_estimation.networks.misc.panostretch": ["scipy", "numpy", "functools"], "modelscope.models.cv.indoor_layout_estimation.networks.misc.post_proc": ["scipy", "numpy", "sklearn"], "modelscope.models.cv.indoor_layout_estimation.networks.misc.fourier": ["scipy", "numpy", "PIL"], "modelscope.models.cv.video_streaming_perception.longshortnet.longshortnet": ["logging", "tqdm", "numpy", "cv2", "time", "argparse", "os", "torch", "json"], "modelscope.models.cv.video_streaming_perception.longshortnet.models.longshort": ["torch"], "modelscope.models.cv.video_streaming_perception.longshortnet.models.dfp_pafpn_short": ["collections", "torch"], "modelscope.models.cv.video_streaming_perception.longshortnet.models.longshort_backbone_neck": ["torch"], "modelscope.models.cv.video_streaming_perception.longshortnet.models.dfp_pafpn_long": ["collections", "torch"], "modelscope.models.cv.video_streaming_perception.longshortnet.exp.longshortnet_base": [], "modelscope.models.cv.video_instance_segmentation.track.mask_hungarian_assigner": ["scipy", "numpy", "mmdet", "torch"], "modelscope.models.cv.video_instance_segmentation.track.kernel_update_head": ["mmcv", "numpy", "mmdet", "torch"], "modelscope.models.cv.video_instance_segmentation.utils": ["numpy", "mmdet", "torch"], "modelscope.models.cv.video_instance_segmentation.neck.msdeformattn_decoder": ["mmcv", "mmdet", "torch"], "modelscope.models.cv.video_instance_segmentation.head.kernel_updator": ["mmcv", "torch"], "modelscope.models.cv.video_instance_segmentation.head.kernel_iter_head": ["mmdet", "torch"], "modelscope.models.cv.video_instance_segmentation.head.kernel_head": ["mmcv", "mmdet", "torch"], "modelscope.models.cv.video_instance_segmentation.head.kernel_update_head": ["mmcv", "numpy", "mmdet", "torch"], "modelscope.models.cv.video_instance_segmentation.head.kernel_frame_iter_head": ["mmcv", "mmdet", "torch"], "modelscope.models.cv.video_instance_segmentation.video_knet": ["mmdet", "torch"], "modelscope.models.cv.image_color_enhance.csrnet": ["functools", "torch", "math"], "modelscope.models.cv.image_color_enhance.image_color_enhance": ["typing", "torch", "os"], "modelscope.models.cv.image_color_enhance.deeplpf.deeplpf_image_color_enhance": ["typing", "torch", "os"], "modelscope.models.cv.image_color_enhance.deeplpf.deeplpfnet": ["matplotlib", "torch", "math"], "modelscope.models.cv.image_color_enhance.adaint.adaint": ["os", "typing", "torch", "numbers", "torchvision"], "modelscope.models.cv.image_binary_quant_classification.binary_quant_model": ["collections", "torch", "os"], "modelscope.models.cv.image_binary_quant_classification.bnext": ["numpy", "torch"], "modelscope.models.cv.motion_generation.model": [], "modelscope.models.cv.motion_generation.modules.mdm": ["numpy", "torch", "clip"], "modelscope.models.cv.motion_generation.modules.smpl": ["numpy", "contextlib", "torch", "os", "smplx"], "modelscope.models.cv.motion_generation.modules.respace": ["numpy", "torch"], "modelscope.models.cv.motion_generation.modules.cfg_sampler": ["copy", "torch"], "modelscope.models.cv.motion_generation.modules.gaussian_diffusion": ["numpy", "copy", "torch", "math", "enum"], "modelscope.models.cv.motion_generation.modules.rotation2xyz": ["torch"], "modelscope.models.cv.referring_video_object_segmentation.model": ["typing", "torch", "os"], "modelscope.models.cv.referring_video_object_segmentation.utils.criterion": ["torch"], "modelscope.models.cv.referring_video_object_segmentation.utils.mttr": ["torch", "einops"], "modelscope.models.cv.referring_video_object_segmentation.utils.swin_transformer": ["timm", "numpy", "operator", "einops", "torch", "functools"], "modelscope.models.cv.referring_video_object_segmentation.utils.postprocessing": ["numpy", "pycocotools", "torch", "einops"], "modelscope.models.cv.referring_video_object_segmentation.utils.position_encoding_2d": ["torch", "math"], "modelscope.models.cv.referring_video_object_segmentation.utils.matcher": ["scipy", "torch"], "modelscope.models.cv.referring_video_object_segmentation.utils.backbone": ["torch", "einops", "torchvision"], "modelscope.models.cv.referring_video_object_segmentation.utils.multimodal_transformer": ["copy", "einops", "os", "typing", "transformers", "torch"], "modelscope.models.cv.referring_video_object_segmentation.utils.misc": ["typing", "torch", "pickle", "torchvision"], "modelscope.models.cv.referring_video_object_segmentation.utils.segmentation": ["typing", "torch"], "modelscope.models.cv.image_to_image_translation.ops.svd": ["torch"], "modelscope.models.cv.image_to_image_translation.ops.random_mask": ["numpy", "cv2"], "modelscope.models.cv.image_to_image_translation.ops.losses": ["torch", "math"], "modelscope.models.cv.image_to_image_translation.ops.diffusion": ["torch", "math"], "modelscope.models.cv.image_to_image_translation.ops.utils": ["PIL", "numpy", "math", "os", "cv2", "multiprocessing", "hashlib", "io", "torch", "json", "binascii", "zipfile", "base64"], "modelscope.models.cv.image_to_image_translation.ops.metrics": ["scipy", "numpy", "torch"], "modelscope.models.cv.image_to_image_translation.ops.random_color": ["random", "colorsys"], "modelscope.models.cv.image_to_image_translation.ops.degradation": ["numpy", "torch", "cv2", "math", "scipy", "os", "random"], "modelscope.models.cv.image_to_image_translation.ops.apps": ["PIL", "numpy", "os", "torch", "artist", "torchvision"], "modelscope.models.cv.image_to_image_translation.model_translation": ["torch", "math"], "modelscope.models.cv.image_to_image_translation.models.clip": ["torch", "math"], "modelscope.models.cv.image_to_image_translation.models.autoencoder": ["torch", "math"], "modelscope.models.cv.image_to_image_translation.data.transforms": ["PIL", "torchvision", "math", "random"], "modelscope.models.cv.nerf_recon_vq_compression.network.tensoRF": [], "modelscope.models.cv.nerf_recon_vq_compression.network.weighted_vq": ["torch", "einops", "contextlib"], "modelscope.models.cv.nerf_recon_vq_compression.network.tensorBase": ["numpy", "torch", "time"], "modelscope.models.cv.nerf_recon_vq_compression.network.tensoRF_VQ": ["tqdm", "os", "random", "typing", "torch"], "modelscope.models.cv.nerf_recon_vq_compression.utils": ["PIL", "numpy", "cv2", "skimage", "plyfile", "torch", "scipy", "torchvision"], "modelscope.models.cv.nerf_recon_vq_compression.dataloader.tankstemple": ["tqdm", "PIL", "os", "torch", "torchvision"], "modelscope.models.cv.nerf_recon_vq_compression.dataloader.ray_utils": ["numpy", "torch", "re", "kornia"], "modelscope.models.cv.nerf_recon_vq_compression.dataloader.llff": ["PIL", "numpy", "os", "glob", "torch", "torchvision"], "modelscope.models.cv.nerf_recon_vq_compression.dataloader.blender": ["PIL", "numpy", "tqdm", "cv2", "os", "torch", "json", "torchvision"], "modelscope.models.cv.nerf_recon_vq_compression.dataloader.nsvf": ["tqdm", "PIL", "os", "torch", "torchvision"], "modelscope.models.cv.nerf_recon_vq_compression.renderer": ["tqdm", "numpy", "os", "imageio", "sys", "torch"], "modelscope.models.cv.nerf_recon_vq_compression.nerf_recon_vq_compression": ["tqdm", "numpy", "cv2", "os", "time", "functools", "glob", "torch"], "modelscope.models.cv.bad_image_detecting.bad_image_detecting": ["numpy", "os", "typing", "torch", "torchvision"], "modelscope.models.cv.image_paintbyexample.model": ["omegaconf", "os", "paint_ldm", "typing", "torch"], "modelscope.models.cv.video_object_segmentation.mod_resnet": ["collections", "torch", "math"], "modelscope.models.cv.video_object_segmentation.model": ["typing", "torch", "os"], "modelscope.models.cv.video_object_segmentation.inference_memory_bank": ["torch", "math"], "modelscope.models.cv.video_object_segmentation.inference_core": ["torch"], "modelscope.models.cv.video_object_segmentation.aggregate": ["torch"], "modelscope.models.cv.video_object_segmentation.network": ["torch", "math"], "modelscope.models.cv.video_object_segmentation.cbam": ["torch"], "modelscope.models.cv.video_object_segmentation.modules": ["torch", "torchvision"], "modelscope.models.cv.video_object_segmentation.eval_network": ["torch"], "modelscope.models.cv.ocr_recognition.model": ["torch", "os"], "modelscope.models.cv.ocr_recognition.preprocessor": ["PIL", "numpy", "cv2", "os", "torch"], "modelscope.models.cv.ocr_recognition.modules.LightweightEdge.main_model": ["collections", "torch"], "modelscope.models.cv.ocr_recognition.modules.LightweightEdge.nas_block.mix_ops": ["numpy", "torch"], "modelscope.models.cv.ocr_recognition.modules.LightweightEdge.nas_block.proxyless": ["numpy", "re", "sys", "torch", "queue"], "modelscope.models.cv.ocr_recognition.modules.LightweightEdge.nas_block.layers": ["collections", "numpy", "torch"], "modelscope.models.cv.ocr_recognition.modules.CRNN.main_model": ["torch"], "modelscope.models.cv.ocr_recognition.modules.ConvNextViT.vitstr": ["logging", "__future__", "copy", "functools", "torch"], "modelscope.models.cv.ocr_recognition.modules.ConvNextViT.main_model": ["torch"], "modelscope.models.cv.ocr_recognition.modules.ConvNextViT.timm_tinyc": ["logging", "copy", "torch", "math", "itertools", "collections", "functools"], "modelscope.models.cv.ocr_recognition.modules.ConvNextViT.convnext": ["torch"], "modelscope.models.cv.face_human_hand_detection.shufflenetv2": ["torch"], "modelscope.models.cv.face_human_hand_detection.one_stage_detector": ["torch"], "modelscope.models.cv.face_human_hand_detection.ghost_pan": ["torch", "math"], "modelscope.models.cv.face_human_hand_detection.det_infer": ["numpy", "torch", "cv2"], "modelscope.models.cv.face_human_hand_detection.utils": ["torch"], "modelscope.models.cv.face_human_hand_detection.nanodet_plus_head": ["numpy", "math", "cv2", "torch", "torchvision"], "modelscope.models.cv.image_restoration.image_restoration_model": ["numpy", "torch", "cv2", "os"], "modelscope.models.cv.image_restoration.demoire_models.nets": ["torch"], "modelscope.models.cv.video_human_matting.model": ["numpy", "os", "typing", "torch", "torchvision"], "modelscope.models.cv.video_human_matting.models.effv2": ["torch"], "modelscope.models.cv.video_human_matting.models.deep_guided_filter": ["torch"], "modelscope.models.cv.video_human_matting.models.lraspp": ["torch"], "modelscope.models.cv.video_human_matting.models.matting": ["typing", "torch"], "modelscope.models.cv.video_human_matting.models.decoder": ["typing", "torch"], "modelscope.models.cv.cmdssl_video_embedding.resnet2p1d": ["torch"], "modelscope.models.cv.cmdssl_video_embedding.c3d": ["torch"], "modelscope.models.cv.cmdssl_video_embedding.resnet3d": ["torch"], "modelscope.models.cv.object_detection.mmdet_model": ["numpy", "torch", "os"], "modelscope.models.cv.object_detection.mmdet_ms.roi_heads.mask_heads.fcn_mask_head": ["numpy", "mmdet", "torch", "warnings", "mmcv"], "modelscope.models.cv.object_detection.mmdet_ms.roi_heads.bbox_heads.convfc_bbox_head": ["mmdet", "torch"], "modelscope.models.cv.object_detection.mmdet_ms.dense_heads.rpn_head": ["mmcv", "copy", "mmdet", "torch"], "modelscope.models.cv.object_detection.mmdet_ms.dense_heads.anchor_head": ["mmdet"], "modelscope.models.cv.object_detection.mmdet_ms.utils.checkpoint": ["importlib", "mmcv", "tempfile", "time", "os", "pkgutil", "torch", "warnings", "io", "collections", "torchvision"], "modelscope.models.cv.object_detection.mmdet_ms.utils.convModule_norm": ["mmcv"], "modelscope.models.cv.object_detection.mmdet_ms.backbones.vit": ["timm", "math", "functools", "mmdet", "torch"], "modelscope.models.cv.object_detection.mmdet_ms.necks.fpn": ["mmcv", "mmdet", "torch"], "modelscope.models.cv.vision_middleware.vim": ["torch", "einops", "math"], "modelscope.models.cv.vision_middleware.head": ["mmcv", "numpy", "abc", "torch"], "modelscope.models.cv.vision_middleware.model": ["typing", "json", "torch", "os"], "modelscope.models.cv.vision_middleware.backbone": ["numpy", "math", "os", "typing", "torch", "collections"], "modelscope.models.cv.controllable_image_generation.annotator.mlsd.mbv2_mlsd_large": ["sys", "torch", "os"], "modelscope.models.cv.controllable_image_generation.annotator.mlsd.utils": ["numpy", "torch", "cv2", "os"], "modelscope.models.cv.controllable_image_generation.annotator.annotator": ["numpy", "einops", "cv2", "os", "mmseg", "torch", "mmcv"], "modelscope.models.cv.controllable_image_generation.annotator.openpose.body": ["numpy", "cv2", "math", "time", "matplotlib", "torch", "scipy", "torchvision"], "modelscope.models.cv.controllable_image_generation.annotator.openpose.model": ["collections", "torch"], "modelscope.models.cv.controllable_image_generation.annotator.openpose.util": ["numpy", "matplotlib", "cv2", "math"], "modelscope.models.cv.controllable_image_generation.annotator.openpose.hand": ["numpy", "cv2", "math", "time", "skimage", "matplotlib", "torch", "scipy", "json"], "modelscope.models.cv.controllable_image_generation.annotator.midas.api": ["torchvision", "torch", "cv2", "os"], "modelscope.models.cv.controllable_image_generation.annotator.midas.utils": ["numpy", "sys", "torch", "cv2", "re"], "modelscope.models.cv.controllable_image_generation.annotator.midas.midas.dpt_depth": ["torch"], "modelscope.models.cv.controllable_image_generation.annotator.midas.midas.blocks": ["torch"], "modelscope.models.cv.controllable_image_generation.annotator.midas.midas.midas_net_custom": ["torch"], "modelscope.models.cv.controllable_image_generation.annotator.midas.midas.midas_net": ["torch"], "modelscope.models.cv.controllable_image_generation.annotator.midas.midas.vit": ["timm", "torch", "math", "types"], "modelscope.models.cv.controllable_image_generation.annotator.midas.midas.transforms": ["numpy", "math", "cv2"], "modelscope.models.cv.controllable_image_generation.annotator.midas.midas.base_model": ["torch"], "modelscope.models.cv.controllable_image_generation.controlnet": ["control_ldm", "numpy", "PIL", "tempfile", "math", "os", "cv2", "einops", "random", "sys", "typing", "torch"], "modelscope.models.cv.image_denoise.nafnet.arch_util": ["torch"], "modelscope.models.cv.image_denoise.nafnet.NAFNet_arch": ["numpy", "torch"], "modelscope.models.cv.image_denoise.nafnet_for_image_denoise": ["typing", "torch", "os"], "modelscope.models.cv.image_try_on.warping": ["numpy", "torch", "cv2", "math", "collections"], "modelscope.models.cv.image_try_on.generator": ["functools", "torchvision", "torch", "os"], "modelscope.models.cv.image_try_on.try_on_infer": ["PIL", "numpy", "cv2", "os", "argparse", "yaml", "torch", "torchvision"], "modelscope.models.cv.image_try_on.landmark": ["logging", "torch", "os"], "modelscope.models.cv.image_colorization.unet.utils": ["functools", "torch", "enum"], "modelscope.models.cv.image_colorization.unet.unet": ["numpy", "torch"], "modelscope.models.cv.image_colorization.ddcolor.ddcolor_for_image_colorization": ["numpy", "copy", "os", "typing", "torch"], "modelscope.models.cv.image_colorization.ddcolor.loss": ["torch"], "modelscope.models.cv.image_colorization.ddcolor.utils.position_encoding": ["torch", "math"], "modelscope.models.cv.image_colorization.ddcolor.utils.convnext": ["timm", "torch"], "modelscope.models.cv.image_colorization.ddcolor.utils.transformer_utils": ["typing", "torch"], "modelscope.models.cv.image_colorization.ddcolor.utils.vgg": ["collections", "torchvision", "torch", "os"], "modelscope.models.cv.image_colorization.ddcolor.utils.unet": ["collections", "torch", "enum"], "modelscope.models.cv.image_colorization.ddcolor.ddcolor": ["torch"], "modelscope.models.cv.shop_segmentation.shop_seg_model": ["PIL", "numpy", "os", "typing", "torch", "json"], "modelscope.models.cv.shop_segmentation.neck_fpn": ["mmcv", "timm", "torch"], "modelscope.models.cv.shop_segmentation.utils": ["html", "regex", "os", "functools", "ftfy", "typing", "torch", "gzip"], "modelscope.models.cv.shop_segmentation.head_fpn": ["mmcv", "timm", "numpy", "torch"], "modelscope.models.cv.shop_segmentation.models": ["collections", "timm", "torch", "math"], "modelscope.models.cv.shop_segmentation.common": ["torch", "warnings"], "modelscope.models.cv.shop_segmentation.shop_seg_base": ["torch"], "modelscope.models.cv.language_guided_video_summarization.summarizer": ["numpy", "os", "argparse", "bmt_clipit", "typing", "torch", "videofeatures_clipit"], "modelscope.models.cv.language_guided_video_summarization.transformer.sub_layers": ["numpy", "torch"], "modelscope.models.cv.language_guided_video_summarization.transformer.models": ["numpy", "torch"], "modelscope.models.cv.language_guided_video_summarization.transformer.modules": ["torch"], "modelscope.models.cv.language_guided_video_summarization.transformer.layers": ["torch"], "modelscope.models.cv.video_frame_interpolation.VFINet_arch": ["torch"], "modelscope.models.cv.video_frame_interpolation.flow_model.corr": ["torch"], "modelscope.models.cv.video_frame_interpolation.flow_model.update": ["torch"], "modelscope.models.cv.video_frame_interpolation.flow_model.extractor": ["torch"], "modelscope.models.cv.video_frame_interpolation.flow_model.raft": ["numpy", "torch"], "modelscope.models.cv.video_frame_interpolation.utils.scene_change_detection": ["numpy", "torch"], "modelscope.models.cv.video_frame_interpolation.utils.utils": ["scipy", "numpy", "torch"], "modelscope.models.cv.video_frame_interpolation.interp_model.flow_reversal": ["torch"], "modelscope.models.cv.video_frame_interpolation.interp_model.refinenet_arch": ["numpy", "torch"], "modelscope.models.cv.video_frame_interpolation.interp_model.transformer_layers": ["timm", "sys", "torch", "math", "functools"], "modelscope.models.cv.video_frame_interpolation.interp_model.UNet": ["torch"], "modelscope.models.cv.video_frame_interpolation.interp_model.IFNet_swin": ["timm", "numpy", "torch"], "modelscope.models.cv.video_frame_interpolation.VFINet_for_video_frame_interpolation": ["typing", "copy", "torch", "os"], "modelscope.models.cv.human_reconstruction.Reconstruction": ["PIL", "numpy", "cv2", "os", "skimage", "typing", "torch", "torchvision"], "modelscope.models.cv.human_reconstruction.utils": ["numpy", "torch", "mcubes", "os"], "modelscope.models.cv.human_reconstruction.models.Surface_head": ["torch"], "modelscope.models.cv.human_reconstruction.models.Res_backbone": ["numpy", "torch"], "modelscope.models.cv.human_reconstruction.models.Embedding": ["torch"], "modelscope.models.cv.human_reconstruction.models.detectors": ["numpy", "torch"], "modelscope.models.cv.human_reconstruction.models.human_segmenter": ["numpy", "cv2", "tensorflow"], "modelscope.models.cv.human_reconstruction.models.networks": ["numpy", "functools", "torch"], "modelscope.models.cv.human_reconstruction.models.geometry": ["torch"], "modelscope.models.cv.human_reconstruction.models.PixToMesh": ["torch"], "modelscope.models.cv.virual_tryon.sdafnet": ["numpy", "torch", "random"], "modelscope.models.cv.robust_image_classification.easyrobust_model": ["torch", "os"], "modelscope.models.cv.image_debanding.rrdb.rrdb_image_debanding": ["typing", "torch", "os"], "modelscope.models.cv.vision_efficient_tuning.head": ["torch"], "modelscope.models.cv.vision_efficient_tuning.model": ["typing", "torch"], "modelscope.models.cv.vision_efficient_tuning.timm_weight_init": ["torch", "math", "warnings"], "modelscope.models.cv.vision_efficient_tuning.backbone": ["functools", "torch"], "modelscope.models.cv.vision_efficient_tuning.petl": ["collections", "torch", "math", "torchvision"], "modelscope.models.cv.vision_efficient_tuning.vision_efficient_tuning": ["collections", "torch", "os"], "modelscope.models.cv.vision_efficient_tuning.timm_helpers": ["itertools", "typing", "torch", "math"], "modelscope.models.cv.vision_efficient_tuning.timm_vision_transformer": ["logging", "itertools", "math", "functools", "torch", "collections"], "modelscope.models.cv.panorama_depth_estimation.unifuse_model": ["numpy", "torchvision", "torch", "os"], "modelscope.models.cv.panorama_depth_estimation.networks.equi": ["collections", "numpy", "torch", "__future__"], "modelscope.models.cv.panorama_depth_estimation.networks.resnet": ["torch"], "modelscope.models.cv.panorama_depth_estimation.networks.mobilenet": ["torch"], "modelscope.models.cv.panorama_depth_estimation.networks.unifuse": ["collections", "numpy", "torch", "__future__"], "modelscope.models.cv.panorama_depth_estimation.networks.layers": ["numpy", "torch"], "modelscope.models.cv.panorama_depth_estimation.networks.util": ["scipy", "numpy", "cv2"], "modelscope.models.cv.vidt.fpn_fusion": ["torch"], "modelscope.models.cv.vidt.head": ["copy", "torch", "math"], "modelscope.models.cv.vidt.deformable_transformer": ["timm", "copy", "torch", "math", "warnings"], "modelscope.models.cv.vidt.model": ["torch", "os"], "modelscope.models.cv.vidt.backbone": ["timm", "numpy", "torch", "math", "os"], "modelscope.models.cv.video_multi_object_tracking.utils.image": ["numpy", "cv2"], "modelscope.models.cv.video_multi_object_tracking.utils.kalman_filter": ["scipy", "numpy"], "modelscope.models.cv.video_multi_object_tracking.utils.utils": ["numpy", "torch", "cv2"], "modelscope.models.cv.video_multi_object_tracking.utils.visualization": ["numpy", "cv2"], "modelscope.models.cv.video_multi_object_tracking.models.model": ["torch"], "modelscope.models.cv.video_multi_object_tracking.models.yolo": ["copy", "torch", "math"], "modelscope.models.cv.video_multi_object_tracking.models.decode": ["torch"], "modelscope.models.cv.video_multi_object_tracking.models.common": ["torch"], "modelscope.models.cv.video_multi_object_tracking.tracker.multitracker": ["collections", "numpy", "torch"], "modelscope.models.cv.video_multi_object_tracking.tracker.basetrack": ["collections", "numpy"], "modelscope.models.cv.video_multi_object_tracking.tracker.matching": ["scipy", "numpy", "lap"], "modelscope.models.cv.video_single_object_tracking.utils.utils": ["numpy", "typing", "torch", "cv2", "math"], "modelscope.models.cv.video_single_object_tracking.models.ostrack.vit_ce": ["timm", "functools", "torch"], "modelscope.models.cv.video_single_object_tracking.models.ostrack.ostrack": ["torch"], "modelscope.models.cv.video_single_object_tracking.models.ostrack.utils": ["torch"], "modelscope.models.cv.video_single_object_tracking.models.ostrack.base_backbone": ["timm", "torch"], "modelscope.models.cv.video_single_object_tracking.models.procontext.vit_ce": ["timm", "functools", "torch"], "modelscope.models.cv.video_single_object_tracking.models.procontext.utils": ["torch"], "modelscope.models.cv.video_single_object_tracking.models.procontext.procontext": ["torch"], "modelscope.models.cv.video_single_object_tracking.models.layers.head": ["torch"], "modelscope.models.cv.video_single_object_tracking.models.layers.patch_embed": ["timm", "torch"], "modelscope.models.cv.video_single_object_tracking.models.layers.attn_blocks": ["timm", "torch", "math"], "modelscope.models.cv.video_single_object_tracking.models.layers.attn": ["torch"], "modelscope.models.cv.video_single_object_tracking.tracker.ostrack": ["torch"], "modelscope.models.cv.video_single_object_tracking.tracker.procontext": ["copy", "torch"], "modelscope.models.cv.video_single_object_tracking.config.ostrack": ["easydict"], "modelscope.models.cv.image_quality_assessment_mos.censeo_ivqa_model": ["torch"], "modelscope.models.cv.image_quality_assessment_mos.backbones.resnet": ["torch", "os"], "modelscope.models.cv.image_quality_assessment_mos.heads.simple_head": ["torch"], "modelscope.models.cv.image_quality_assessment_mos.image_quality_assessment_mos": ["typing", "torch", "os"], "modelscope.models.cv.video_stabilization.utils.ProjectionUtils": ["numpy", "torch", "math", "cv2"], "modelscope.models.cv.video_stabilization.utils.WarpUtils": ["tqdm", "numpy", "torch"], "modelscope.models.cv.video_stabilization.utils.RAFTUtils": ["scipy", "numpy", "torch"], "modelscope.models.cv.video_stabilization.utils.math_utils": ["numpy", "torch"], "modelscope.models.cv.video_stabilization.utils.image_utils": ["torch", "skimage"], "modelscope.models.cv.video_stabilization.utils.IterativeSmooth": ["numpy", "torch", "math", "os"], "modelscope.models.cv.video_stabilization.utils.MedianFilter": ["numpy", "torch", "math", "cv2"], "modelscope.models.cv.video_stabilization.DUTRAFTStabilizer": ["numpy", "tempfile", "cv2", "math", "os", "sys", "typing", "torch"], "modelscope.models.cv.video_stabilization.DUT.config": ["__future__", "easydict"], "modelscope.models.cv.video_stabilization.DUT.Smoother": ["numpy", "torch", "math"], "modelscope.models.cv.video_stabilization.DUT.rf_det_module": ["torch"], "modelscope.models.cv.video_stabilization.DUT.RAFT.corr": ["torch", "alt_cuda_corr"], "modelscope.models.cv.video_stabilization.DUT.RAFT.update": ["torch"], "modelscope.models.cv.video_stabilization.DUT.RAFT.extractor": ["torch"], "modelscope.models.cv.video_stabilization.DUT.RAFT.raft": ["numpy", "torch"], "modelscope.models.cv.video_stabilization.DUT.MotionPro": ["numpy", "math", "cv2", "os", "torch"], "modelscope.models.cv.video_stabilization.DUT.rf_det_so": ["torch"], "modelscope.models.cv.video_stabilization.DUT.DUT_raft": ["numpy", "sys", "torch", "cv2"], "modelscope.models.cv.action_detection.action_detection_onnx": ["onnxruntime", "numpy", "tempfile", "cv2", "os", "shutil", "subprocess", "uuid", "urllib"], "modelscope.models.cv.action_detection.modules.resnet": ["detectron2", "torch"], "modelscope.models.cv.action_detection.modules.action_detection_pytorch": ["logging", "fvcore", "detectron2", "typing", "torch"], "modelscope.models.cv.image_portrait_enhancement.losses.losses": ["torch"], "modelscope.models.cv.image_portrait_enhancement.losses.model_irse": ["torch"], "modelscope.models.cv.image_portrait_enhancement.losses.helpers": ["collections", "torch"], "modelscope.models.cv.image_portrait_enhancement.align_faces": ["numpy", "cv2", "skimage"], "modelscope.models.cv.image_portrait_enhancement.image_portrait_enhancement": ["typing", "torch", "math", "os"], "modelscope.models.cv.image_portrait_enhancement.eqface.model_resnet": ["torch"], "modelscope.models.cv.image_portrait_enhancement.eqface.fqa": ["numpy", "torch", "cv2", "os"], "modelscope.models.cv.image_portrait_enhancement.gpen": ["operator", "itertools", "math", "functools", "random", "torch"], "modelscope.models.cv.image_portrait_enhancement.retinaface.utils": ["numpy", "torch", "itertools", "math"], "modelscope.models.cv.image_portrait_enhancement.retinaface.models.net": ["time", "torch", "torchvision"], "modelscope.models.cv.image_portrait_enhancement.retinaface.models.retinaface": ["collections", "torch", "torchvision"], "modelscope.models.cv.image_portrait_enhancement.retinaface.detection": ["numpy", "torch", "cv2", "os"], "modelscope.models.cv.action_recognition.tada_convnext": ["torch", "math"], "modelscope.models.cv.action_recognition.temporal_patch_shift_transformer": ["timm", "numpy", "operator", "einops", "functools", "abc", "torch", "torchvision"], "modelscope.models.cv.action_recognition.models": ["torch"], "modelscope.models.cv.action_recognition.s3dg": ["torch"], "modelscope.models.cv.facial_expression_recognition.fer.facial_expression_recognition": ["PIL", "numpy", "cv2", "os", "torch"], "modelscope.models.cv.facial_expression_recognition.fer.transforms": ["PIL", "numpy", "torch", "numbers", "types"], "modelscope.models.cv.facial_expression_recognition.fer.vgg": ["torch"], "modelscope.models.cv.product_retrieval_embedding.item_embedding": ["numpy", "torch", "cv2"], "modelscope.models.cv.product_retrieval_embedding.item_detection": ["numpy", "cv2"], "modelscope.models.cv.product_retrieval_embedding.item_model": ["numpy", "torch", "typing", "os"], "modelscope.models.cv.video_inpainting.inpainting_model": ["numpy", "torch", "math", "torchvision"], "modelscope.models.cv.video_inpainting.inpainting": ["PIL", "numpy", "torch", "cv2", "time", "os", "torchvision"], "modelscope.models.cv.image_mvs_depth_estimation.module": ["torch"], "modelscope.models.cv.image_mvs_depth_estimation.depth_filter": ["PIL", "numpy", "cv2", "os", "plyfile"], "modelscope.models.cv.image_mvs_depth_estimation.colmap2mvsnet": ["numpy", "__future__", "multiprocessing", "os", "cv2", "shutil", "functools", "struct", "collections"], "modelscope.models.cv.image_mvs_depth_estimation.utils": ["numpy", "torch", "random", "torchvision"], "modelscope.models.cv.image_mvs_depth_estimation.cas_mvsnet": ["torch"], "modelscope.models.cv.image_mvs_depth_estimation.general_eval_dataset": ["PIL", "numpy", "sys", "torch", "cv2", "os", "re"], "modelscope.models.cv.image_mvs_depth_estimation.casmvs_model": ["numpy", "cv2", "os", "torch", "easydict"], "modelscope.models.cv.abnormal_object_detection.mmdet_model": ["numpy", "torch", "os"], "modelscope.models.cv.abnormal_object_detection.mmdet_ms.roi_head.roi_extractors.single_level_roi_extractor": ["mmcv", "mmdet", "torch"], "modelscope.models.cv.abnormal_object_detection.mmdet_ms.roi_head.mask_scoring_roi_head": ["mmdet", "torch"], "modelscope.models.cv.image_body_reshaping.pose_estimator.body": ["numpy", "math", "cv2", "torch", "scipy"], "modelscope.models.cv.image_body_reshaping.pose_estimator.model": ["collections", "torch"], "modelscope.models.cv.image_body_reshaping.pose_estimator.util": ["numpy"], "modelscope.models.cv.image_body_reshaping.model": ["torch"], "modelscope.models.cv.image_body_reshaping.image_body_reshaping": ["numpy", "cv2", "os", "typing", "torch"], "modelscope.models.cv.image_body_reshaping.person_info": ["numpy", "copy", "torch", "cv2"], "modelscope.models.cv.image_body_reshaping.slim_utils": ["numpy", "numba", "torch", "cv2", "math", "os", "random"], "modelscope.models.cv.tinynas_classfication.master_net": ["torch"], "modelscope.models.cv.tinynas_classfication.super_blocks": ["torch", "uuid"], "modelscope.models.cv.tinynas_classfication.basic_blocks": ["numpy", "torch", "uuid"], "modelscope.models.cv.tinynas_classfication.super_res_k1kxk1": ["torch", "uuid"], "modelscope.models.cv.tinynas_classfication.model_zoo": [], "modelscope.models.cv.tinynas_classfication.global_utils": [], "modelscope.models.cv.tinynas_classfication.super_res_kxkx": ["torch", "uuid"], "modelscope.models.cv.tinynas_classfication.plain_net_utils": ["torch"], "modelscope.models.cv.tinynas_classfication.super_res_idwexkx": ["torch", "uuid"], "modelscope.models.cv.crowd_counting.hrnet_aspp_relu": ["logging", "numpy", "os", "functools", "torch"], "modelscope.models.cv.crowd_counting.cc_model": ["typing", "torch", "os"], "modelscope.models.cv.image_probing_model.model": ["typing", "json", "torch", "os"], "modelscope.models.cv.image_probing_model.backbone": ["PIL", "numpy", "operator", "math", "functools", "sys", "torch", "collections", "torchvision"], "modelscope.models.cv.image_probing_model.utils": ["re", "torch"], "modelscope.models.cv.salient_detection.models.u2net": ["torch"], "modelscope.models.cv.salient_detection.models.utils": ["torch"], "modelscope.models.cv.salient_detection.models.senet": ["torch"], "modelscope.models.cv.salient_detection.models.modules": ["torch"], "modelscope.models.cv.salient_detection.models.backbone.Res2Net_v1b": ["torch", "math"], "modelscope.models.cv.salient_detection.salient_model": ["PIL", "cv2", "os", "torch", "torchvision"], "modelscope.models.cv.table_recognition.model_lore": ["numpy", "copy", "math", "os", "typing", "torch"], "modelscope.models.cv.table_recognition.modules.lore_processor": ["numpy", "copy", "torch", "math", "os"], "modelscope.models.cv.table_recognition.modules.lore_detector": ["numpy", "copy", "torch", "math", "os"], "modelscope.models.cv.table_recognition.lineless_table_process": ["shapely", "numpy", "torch", "cv2"], "modelscope.models.cv.image_reid_person.transreid_model": ["collections", "functools", "torch", "itertools"], "modelscope.models.cv.image_reid_person.pass_model": ["torch", "enum", "os"], "modelscope.models.cv.video_panoptic_segmentation.video_k_net": ["mmcv", "numpy", "mmdet", "torch"], "modelscope.models.cv.video_panoptic_segmentation.track.quasi_dense_embed_tracker": ["mmcv", "mmdet", "torch"], "modelscope.models.cv.video_panoptic_segmentation.visualizer": ["hashlib", "numpy", "cv2"], "modelscope.models.cv.video_panoptic_segmentation.neck.fpn": ["mmcv", "torch"], "modelscope.models.cv.video_panoptic_segmentation.head.mask": ["numpy", "__future__", "pycocotools", "torch", "cv2"], "modelscope.models.cv.video_panoptic_segmentation.head.kernel_updator": ["mmcv", "torch"], "modelscope.models.cv.video_panoptic_segmentation.head.kernel_iter_head": ["mmdet", "torch"], "modelscope.models.cv.video_panoptic_segmentation.head.kernel_head": ["mmcv", "torch"], "modelscope.models.cv.video_panoptic_segmentation.head.kernel_update_head": ["mmcv", "numpy", "mmdet", "torch"], "modelscope.models.cv.video_panoptic_segmentation.head.track_heads": ["mmcv", "numpy", "torch"], "modelscope.models.cv.video_panoptic_segmentation.head.semantic_fpn_wrapper": ["mmcv", "mmdet", "torch"], "modelscope.models.cv.video_panoptic_segmentation.backbone.swin_transformer": ["timm", "numpy", "mmdet", "torch"], "modelscope.models.cv.video_panoptic_segmentation.backbone.swin_checkpoint": ["pkgutil", "importlib", "torch", "os", "collections", "torchvision"], "modelscope.models.cv.face_recognition.torchkit.rts_backbone": ["collections", "torch", "math", "os"], "modelscope.models.cv.face_recognition.torchkit.backbone.model_resnet": ["torch"], "modelscope.models.cv.face_recognition.torchkit.backbone.arcface_backbone": ["torch"], "modelscope.models.cv.face_recognition.torchkit.backbone.model_irse": ["collections", "torch"], "modelscope.models.cv.face_recognition.torchkit.backbone.facemask_backbone": ["collections", "torch"], "modelscope.models.cv.face_recognition.torchkit.backbone.common": ["torch"], "modelscope.models.cv.face_recognition.align_face": ["numpy", "cv2", "skimage"], "modelscope.models.cv.body_3d_keypoints.cannonical_pose.canonical_pose_modules": ["torch"], "modelscope.models.cv.body_3d_keypoints.cannonical_pose.body_3d_pose": ["logging", "numpy", "os", "typing", "torch"], "modelscope.models.cv.body_3d_keypoints.hdformer.hdformer": ["torch"], "modelscope.models.cv.body_3d_keypoints.hdformer.backbone": ["torch"], "modelscope.models.cv.body_3d_keypoints.hdformer.skeleton": ["numpy"], "modelscope.models.cv.body_3d_keypoints.hdformer.directed_graph": ["numpy", "sys", "typing"], "modelscope.models.cv.body_3d_keypoints.hdformer.hdformer_detector": ["numpy", "torch", "typing", "os"], "modelscope.models.cv.body_3d_keypoints.hdformer.block": ["torch", "einops", "math"], "modelscope.models.cv.pedestrian_attribute_recognition.model": ["numpy", "torchvision", "torch", "os"], "modelscope.models.cv.image_instance_segmentation.postprocess_utils": ["numpy", "pycocotools", "itertools", "cv2", "torch"], "modelscope.models.cv.image_instance_segmentation.maskdino_model": ["typing", "torch", "os"], "modelscope.models.cv.image_instance_segmentation.model": ["typing", "torch", "os"], "modelscope.models.cv.image_instance_segmentation.backbones.swin_transformer": ["timm", "numpy", "torch"], "modelscope.models.cv.image_instance_segmentation.backbones.resnet": ["torch"], "modelscope.models.cv.image_instance_segmentation.fastinst.fastinst_decoder": ["torch", "math"], "modelscope.models.cv.image_instance_segmentation.fastinst.fastinst_encoder": ["logging", "typing", "torch"], "modelscope.models.cv.image_instance_segmentation.fastinst_model": ["typing", "torch", "os"], "modelscope.models.cv.image_instance_segmentation.maskdino.position_encoding": ["torch", "math"], "modelscope.models.cv.image_instance_segmentation.maskdino.maskdino_decoder": ["torch"], "modelscope.models.cv.image_instance_segmentation.maskdino.maskdino_encoder": ["numpy", "torch", "typing"], "modelscope.models.cv.image_instance_segmentation.maskdino.ms_deform_attn": ["__future__", "torch", "math", "warnings", "mmcv"], "modelscope.models.cv.image_instance_segmentation.maskdino.utils": ["copy", "torch", "math"], "modelscope.models.cv.image_instance_segmentation.maskdino.dino_decoder": ["typing", "torch"], "modelscope.models.cv.image_instance_segmentation.cascade_mask_rcnn_swin": ["collections", "torch", "os"], "modelscope.models.cv.image_instance_segmentation.datasets.transforms": ["numpy", "os"], "modelscope.models.cv.image_instance_segmentation.maskdino_swin": ["torch", "os"], "modelscope.models.cv.image_depth_estimation_bts.depth_estimation_bts_model": ["torch", "os"], "modelscope.models.cv.image_depth_estimation_bts.networks.encoder": ["torch", "torchvision"], "modelscope.models.cv.image_depth_estimation_bts.networks.bts_model": ["torch"], "modelscope.models.cv.image_depth_estimation_bts.networks.utils": ["torch", "math"], "modelscope.models.cv.image_depth_estimation_bts.networks.decoder": ["torch"], "modelscope.models.cv.product_segmentation.seg_infer": ["PIL", "numpy", "torch", "cv2"], "modelscope.models.cv.product_segmentation.net": ["torch"], "modelscope.models.cv.image_driving_perception.preprocessor": ["numpy", "typing", "torch", "cv2"], "modelscope.models.cv.image_driving_perception.image_driving_percetion_model": ["numpy", "cv2", "os", "typing", "torch"], "modelscope.models.cv.image_driving_perception.utils": ["numpy", "torchvision", "torch", "time"], "modelscope.models.cv.face_detection.scrfd.scrfd_detect": ["numpy", "copy", "os", "typing", "torch"], "modelscope.models.cv.face_detection.scrfd.tinymog_detect": ["typing", "copy", "torch", "os"], "modelscope.models.cv.face_detection.scrfd.preprocessor": ["PIL", "numpy", "typing"], "modelscope.models.cv.face_detection.scrfd.damofd_detect": ["typing", "copy", "torch", "os"], "modelscope.models.cv.face_detection.scrfd.mmdet_patch.core.bbox.transforms": ["numpy", "torch"], "modelscope.models.cv.face_detection.scrfd.mmdet_patch.core.post_processing.bbox_nms": ["torch"], "modelscope.models.cv.face_detection.scrfd.mmdet_patch.models.detectors.single_stage": ["mmdet", "torch"], "modelscope.models.cv.face_detection.scrfd.mmdet_patch.models.detectors.tinymog": ["mmdet", "torch"], "modelscope.models.cv.face_detection.scrfd.mmdet_patch.models.detectors.scrfd": ["mmdet", "torch"], "modelscope.models.cv.face_detection.scrfd.mmdet_patch.models.detectors.base": ["numpy", "mmdet", "torch", "mmcv", "collections", "abc"], "modelscope.models.cv.face_detection.scrfd.mmdet_patch.models.dense_heads.scrfd_head": ["mmcv", "numpy", "mmdet", "torch"], "modelscope.models.cv.face_detection.scrfd.mmdet_patch.models.backbones.master_net": ["mmdet", "torch"], "modelscope.models.cv.face_detection.scrfd.mmdet_patch.models.backbones.resnet": ["mmcv", "mmdet", "torch"], "modelscope.models.cv.face_detection.scrfd.mmdet_patch.models.backbones.mobilenet": ["mmcv", "mmdet", "torch"], "modelscope.models.cv.face_detection.scrfd.mmdet_patch.datasets.pipelines.formating": ["mmcv", "numpy", "mmdet", "torch"], "modelscope.models.cv.face_detection.scrfd.mmdet_patch.datasets.pipelines.auto_augment": ["numpy", "copy", "mmdet", "cv2", "mmcv"], "modelscope.models.cv.face_detection.scrfd.mmdet_patch.datasets.pipelines.loading": ["numpy", "mmdet", "pycocotools", "os"], "modelscope.models.cv.face_detection.scrfd.mmdet_patch.datasets.pipelines.transforms": ["mmcv", "numpy", "mmdet"], "modelscope.models.cv.face_detection.scrfd.mmdet_patch.datasets.retinaface": ["numpy", "mmdet"], "modelscope.models.cv.face_detection.mogface.models.mogprednet": ["torch", "math"], "modelscope.models.cv.face_detection.mogface.models.detectors": ["numpy", "torch", "cv2", "os"], "modelscope.models.cv.face_detection.mogface.models.resnet": ["torch"], "modelscope.models.cv.face_detection.mogface.models.utils": ["itertools", "numpy", "torch", "math"], "modelscope.models.cv.face_detection.mogface.models.mogface": ["torch"], "modelscope.models.cv.face_detection.mtcnn.models.get_nets": ["collections", "numpy", "torch"], "modelscope.models.cv.face_detection.mtcnn.models.detector": ["PIL", "numpy", "torch", "os"], "modelscope.models.cv.face_detection.mtcnn.models.box_utils": ["PIL", "numpy"], "modelscope.models.cv.face_detection.mtcnn.models.first_stage": ["PIL", "numpy", "torch", "math"], "modelscope.models.cv.face_detection.ulfd_slim.vision.mb_tiny": ["torch"], "modelscope.models.cv.face_detection.ulfd_slim.vision.ssd.predictor": ["torch"], "modelscope.models.cv.face_detection.ulfd_slim.vision.ssd.mb_tiny_fd": ["torch"], "modelscope.models.cv.face_detection.ulfd_slim.vision.ssd.ssd": ["collections", "numpy", "torch", "typing"], "modelscope.models.cv.face_detection.ulfd_slim.vision.ssd.data_preprocessing": [], "modelscope.models.cv.face_detection.ulfd_slim.vision.ssd.fd_config": ["numpy"], "modelscope.models.cv.face_detection.ulfd_slim.vision.transforms": ["numpy", "torch", "cv2", "types"], "modelscope.models.cv.face_detection.ulfd_slim.vision.box_utils": ["torch", "math"], "modelscope.models.cv.face_detection.ulfd_slim.detection": ["numpy", "torch", "cv2", "os"], "modelscope.models.cv.face_detection.peppa_pig_face.LK.lk": ["numpy"], "modelscope.models.cv.face_detection.peppa_pig_face.face_detector": ["numpy", "cv2", "tensorflow"], "modelscope.models.cv.face_detection.peppa_pig_face.face_landmark": ["numpy", "cv2", "tensorflow"], "modelscope.models.cv.face_detection.peppa_pig_face.facer": ["numpy", "cv2"], "modelscope.models.cv.face_detection.retinaface.utils": ["numpy", "torch", "itertools", "math"], "modelscope.models.cv.face_detection.retinaface.models.net": ["time", "torch", "torchvision"], "modelscope.models.cv.face_detection.retinaface.models.retinaface": ["collections", "torch", "torchvision"], "modelscope.models.cv.face_detection.retinaface.detection": ["numpy", "torch", "cv2"], "modelscope.models.cv.video_deinterlace.UNet_for_video_deinterlace": ["typing", "copy", "torch", "os"], "modelscope.models.cv.video_deinterlace.models.enh": ["torch"], "modelscope.models.cv.video_deinterlace.models.utils": ["torch"], "modelscope.models.cv.video_deinterlace.models.archs": ["numpy", "torch"], "modelscope.models.cv.video_deinterlace.models.deep_fourier_upsampling": ["numpy", "torch"], "modelscope.models.cv.video_deinterlace.models.fre": ["torch"], "modelscope.models.cv.video_deinterlace.deinterlace_arch": ["torch"], "modelscope.models.cv.facial_landmark_confidence.flc.facial_landmark_confidence": ["PIL", "numpy", "cv2", "os", "torch"], "modelscope.models.cv.facial_landmark_confidence.flc.manual_landmark_net": ["torch", "math"], "modelscope.models.cv.tinynas_detection.damo.structures.image_list": ["__future__", "torch"], "modelscope.models.cv.tinynas_detection.damo.structures.boxlist_ops": ["torch"], "modelscope.models.cv.tinynas_detection.damo.structures.bounding_box": ["torch"], "modelscope.models.cv.tinynas_detection.damo.detectors.detector": ["torch"], "modelscope.models.cv.tinynas_detection.damo.augmentations.scale_aware_aug": ["copy"], "modelscope.models.cv.tinynas_detection.damo.augmentations.box_level_augs.box_level_augs": ["numpy", "random"], "modelscope.models.cv.tinynas_detection.damo.augmentations.box_level_augs.gaussian_maps": ["torch", "math"], "modelscope.models.cv.tinynas_detection.damo.augmentations.box_level_augs.geometric_augs": ["copy", "torch", "random", "torchvision"], "modelscope.models.cv.tinynas_detection.damo.augmentations.box_level_augs.color_augs": ["torch", "random"], "modelscope.models.cv.tinynas_detection.damo.apis.detector_evaluater": ["torch", "os"], "modelscope.models.cv.tinynas_detection.damo.apis.detector_inference": ["tqdm", "torch", "os"], "modelscope.models.cv.tinynas_detection.damo.base_models.losses.gfocal_loss": ["functools", "torch"], "modelscope.models.cv.tinynas_detection.damo.base_models.losses.distill_loss": ["torch"], "modelscope.models.cv.tinynas_detection.damo.base_models.core.ops": ["numpy", "torch"], "modelscope.models.cv.tinynas_detection.damo.base_models.core.neck_ops": ["numpy", "torch"], "modelscope.models.cv.tinynas_detection.damo.base_models.core.weight_init": ["numpy", "torch"], "modelscope.models.cv.tinynas_detection.damo.base_models.core.repvgg_block": ["numpy", "torch"], "modelscope.models.cv.tinynas_detection.damo.base_models.core.base_ops": ["torch", "math"], "modelscope.models.cv.tinynas_detection.damo.base_models.core.utils": ["functools", "torch"], "modelscope.models.cv.tinynas_detection.damo.base_models.core.ota_assigner": ["torch", "warnings"], "modelscope.models.cv.tinynas_detection.damo.base_models.backbones.tinynas_csp": ["torch"], "modelscope.models.cv.tinynas_detection.damo.base_models.backbones.tinynas_res": ["torch"], "modelscope.models.cv.tinynas_detection.damo.base_models.backbones.darknet": ["torch"], "modelscope.models.cv.tinynas_detection.damo.base_models.heads.gfocal_v2_tiny": ["numpy", "functools", "torch"], "modelscope.models.cv.tinynas_detection.damo.base_models.heads.zero_head": ["torch"], "modelscope.models.cv.tinynas_detection.damo.base_models.necks.giraffe_fpn": ["timm", "numpy", "math", "functools", "typing", "torch", "collections"], "modelscope.models.cv.tinynas_detection.damo.base_models.necks.giraffe_config": ["networkx", "collections"], "modelscope.models.cv.tinynas_detection.damo.base_models.necks.giraffe_fpn_btn": ["torch"], "modelscope.models.cv.tinynas_detection.damo.utils.boxes": ["numpy", "torch", "torchvision"], "modelscope.models.cv.tinynas_detection.damo.utils.scheduler": ["math"], "modelscope.models.cv.tinynas_detection.damo.utils.model_utils": ["copy", "torch", "math", "time", "thop"], "modelscope.models.cv.tinynas_detection.tinynas_detector": [], "modelscope.models.cv.tinynas_detection.tinynas_damoyolo": [], "modelscope.models.cv.tinynas_detection.detector": ["pickle", "torchvision", "torch", "os"], "modelscope.models.cv.tinynas_detection.utils": ["importlib", "sys", "tempfile", "os", "shutil", "easydict"], "modelscope.models.cv.face_emotion.efficient.model": ["torch"], "modelscope.models.cv.face_emotion.efficient.utils": ["torch", "math", "collections", "functools", "re"], "modelscope.models.cv.face_emotion.emotion_infer": ["PIL", "torch", "torchvision"], "modelscope.models.cv.face_emotion.face_alignment.face": ["numpy", "tensorflow", "cv2", "os"], "modelscope.models.cv.face_emotion.face_alignment.face_align": ["PIL", "numpy", "cv2", "os", "sys"], "modelscope.models.cv.face_emotion.emotion_model": ["sys", "torch", "os"], "modelscope.models.cv.image_human_parsing.m2fp_net": ["typing", "torch", "os"], "modelscope.models.cv.image_human_parsing.parsing_utils": ["PIL", "numpy", "copy", "torch"], "modelscope.models.cv.image_human_parsing.backbone.deeplab_resnet": ["numpy", "torch"], "modelscope.models.cv.image_human_parsing.m2fp.m2fp_decoder": ["torch"], "modelscope.models.cv.image_human_parsing.m2fp.m2fp_encoder": ["numpy", "torch", "typing"], "modelscope.models.cv.super_resolution.arch_util": ["warnings", "torch", "math", "itertools", "collections", "torchvision"], "modelscope.models.cv.super_resolution.ecb": ["torch"], "modelscope.models.cv.super_resolution.rrdbnet_arch": ["torch"], "modelscope.models.cv.super_resolution.ecbsr_model": ["typing", "torch", "os"], "modelscope.models.cv.s2net_panorama_depth_estimation.s2net_model": ["numpy", "torchvision", "torch", "os"], "modelscope.models.cv.s2net_panorama_depth_estimation.networks.util_helper": ["importlib", "numpy", "apex", "healpy", "os", "pkgutil", "torch", "warnings", "torchvision"], "modelscope.models.cv.s2net_panorama_depth_estimation.networks.swin_transformer": ["timm", "numpy", "torch"], "modelscope.models.cv.s2net_panorama_depth_estimation.networks.config": ["yacs", "yaml", "os"], "modelscope.models.cv.s2net_panorama_depth_estimation.networks.model": ["numpy", "torch"], "modelscope.models.cv.s2net_panorama_depth_estimation.networks.resnet": ["torch"], "modelscope.models.cv.s2net_panorama_depth_estimation.networks.decoder": ["numpy", "torch", "einops"], "modelscope.models.cv.skin_retouching.inpainting_model.gconv": ["torch"], "modelscope.models.cv.skin_retouching.inpainting_model.inpainting_unet": ["torch"], "modelscope.models.cv.skin_retouching.weights_init": ["torch"], "modelscope.models.cv.skin_retouching.utils": ["numpy", "typing", "einops", "cv2", "time", "torch"], "modelscope.models.cv.skin_retouching.detection_model.detection_unet_in": ["torch"], "modelscope.models.cv.skin_retouching.detection_model.detection_module": ["torch"], "modelscope.models.cv.skin_retouching.retinaface.utils": ["numpy", "typing", "pathlib", "torch", "cv2", "re"], "modelscope.models.cv.skin_retouching.retinaface.network": ["typing", "torch", "torchvision"], "modelscope.models.cv.skin_retouching.retinaface.prior_box": ["itertools", "torch", "math"], "modelscope.models.cv.skin_retouching.retinaface.predict_single": ["numpy", "typing", "albumentations", "torch", "torchvision"], "modelscope.models.cv.skin_retouching.retinaface.net": ["typing", "torch"], "modelscope.models.cv.skin_retouching.retinaface.box_utils": ["numpy", "torch", "typing"], "modelscope.models.cv.skin_retouching.unet_deploy": ["torch", "warnings"], "modelscope.models.cv.ocr_detection.model": ["numpy", "torch", "typing", "os"], "modelscope.models.cv.ocr_detection.preprocessor": ["PIL", "numpy", "cv2", "math", "os", "typing", "torch"], "modelscope.models.cv.ocr_detection.utils": ["shapely", "numpy", "pyclipper", "cv2"], "modelscope.models.cv.ocr_detection.modules.dbnet": ["math", "os", "sys", "torch", "collections"], "modelscope.models.cv.ocr_detection.modules.mix_ops": ["numpy", "torch", "math"], "modelscope.models.cv.ocr_detection.modules.proxyless": ["numpy", "torch", "sys", "re"], "modelscope.models.cv.ocr_detection.modules.seg_detector_loss": ["sys", "torch"], "modelscope.models.cv.ocr_detection.modules.layers": ["collections", "numpy", "torch"], "modelscope.models.builder": [], "modelscope.models.science.unifold.config": ["typing", "copy", "ml_collections"], "modelscope.models.science.unifold.model": ["argparse", "typing", "torch", "os"], "modelscope.models.science.unifold.data.process_multimer": ["collections", "numpy", "typing"], "modelscope.models.science.unifold.data.data_ops": ["numpy", "operator", "itertools", "functools", "typing", "unicore", "torch"], "modelscope.models.science.unifold.data.residue_constants": ["numpy", "typing", "unicore", "os", "collections", "functools"], "modelscope.models.science.unifold.data.utils": ["numpy", "copy", "functools", "typing", "pickle", "scipy", "json", "gzip"], "modelscope.models.science.unifold.data.protein": ["numpy", "dataclasses", "typing", "Bio", "io"], "modelscope.models.science.unifold.data.msa_pairing": ["numpy", "collections", "pandas", "typing", "scipy"], "modelscope.models.science.unifold.data.process": ["numpy", "torch", "typing"], "modelscope.models.science.unifold.msa.mmcif": ["dataclasses", "typing", "absl", "Bio", "io", "collections", "functools"], "modelscope.models.science.unifold.msa.utils": ["typing", "absl", "json", "os"], "modelscope.models.science.unifold.msa.msa_identifiers": ["dataclasses", "typing", "re"], "modelscope.models.science.unifold.msa.templates": ["numpy", "absl", "os", "functools", "abc", "re", "dataclasses", "typing", "glob", "datetime"], "modelscope.models.science.unifold.msa.tools.hhblits": ["absl", "os", "subprocess", "typing", "glob"], "modelscope.models.science.unifold.msa.tools.hhsearch": ["absl", "os", "subprocess", "typing", "glob"], "modelscope.models.science.unifold.msa.tools.utils": ["typing", "absl", "tempfile", "time", "shutil", "contextlib"], "modelscope.models.science.unifold.msa.tools.jackhmmer": ["concurrent", "absl", "os", "subprocess", "typing", "glob", "urllib"], "modelscope.models.science.unifold.msa.tools.hmmbuild": ["absl", "re", "subprocess", "os"], "modelscope.models.science.unifold.msa.tools.hmmsearch": ["typing", "absl", "subprocess", "os"], "modelscope.models.science.unifold.msa.tools.kalign": ["typing", "absl", "subprocess", "os"], "modelscope.models.science.unifold.msa.pipeline": ["numpy", "absl", "typing", "os"], "modelscope.models.science.unifold.msa.parsers": ["dataclasses", "typing", "string", "itertools", "collections", "re"], "modelscope.models.science.unifold.modules.confidence": ["typing", "torch"], "modelscope.models.science.unifold.modules.attentions": ["unicore", "functools", "torch", "typing"], "modelscope.models.science.unifold.modules.structure_module": ["unicore", "typing", "torch", "math"], "modelscope.models.science.unifold.modules.evoformer": ["unicore", "functools", "torch", "typing"], "modelscope.models.science.unifold.modules.alphafold": ["unicore", "torch"], "modelscope.models.science.unifold.modules.triangle_multiplication": ["unicore", "functools", "torch", "typing"], "modelscope.models.science.unifold.modules.template": ["math", "functools", "unicore", "typing", "torch"], "modelscope.models.science.unifold.modules.embedders": ["unicore", "torch", "typing"], "modelscope.models.science.unifold.modules.frame": ["numpy", "torch", "typing", "__future__"], "modelscope.models.science.unifold.modules.auxillary_heads": ["unicore", "torch", "typing"], "modelscope.models.science.unifold.modules.common": ["unicore", "functools", "torch", "typing"], "modelscope.models.science.unifold.modules.featurization": ["unicore", "torch", "typing"], "modelscope.models.science.unifold.dataset": ["logging", "numpy", "copy", "os", "typing", "unicore", "torch", "json", "ml_collections"], "modelscope.metrics.image_instance_segmentation_metric": ["numpy", "pycocotools", "tempfile", "os", "typing", "collections"], "modelscope.metrics.ppl_metric": ["numpy", "typing", "torch", "math"], "modelscope.metrics.loss_metric": ["numpy", "sklearn", "typing"], "modelscope.metrics.image_inpainting_metric": ["scipy", "numpy", "torch", "typing"], "modelscope.metrics.map_metric": ["numpy", "typing"], "modelscope.metrics.video_super_resolution_metric.video_super_resolution_metric": ["numpy", "typing"], "modelscope.metrics.video_super_resolution_metric.metric_util": ["numpy"], "modelscope.metrics.video_super_resolution_metric.matlab_functions": ["numpy", "torch", "math"], "modelscope.metrics.video_super_resolution_metric.niqe": ["scipy", "numpy", "math", "cv2"], "modelscope.metrics.token_classification_metric": ["importlib", "numpy", "typing"], "modelscope.metrics.video_frame_interpolation_metric": ["numpy", "lpips", "math", "typing", "torch"], "modelscope.metrics.text_generation_metric": ["nltk", "contextlib", "rouge", "sys", "typing"], "modelscope.metrics.movie_scene_segmentation_metric": ["numpy", "typing"], "modelscope.metrics.ned_metric": ["numpy", "typing"], "modelscope.metrics.image_portrait_enhancement_metric": ["numpy", "typing", "cv2"], "modelscope.metrics.sequence_classification_metric": ["numpy", "sklearn", "typing"], "modelscope.metrics.image_denoise_metric": ["numpy", "typing", "torch", "cv2"], "modelscope.metrics.builder": ["typing"], "modelscope.metrics.translation_evaluation_metric": ["importlib", "typing", "pandas"], "modelscope.metrics.image_colorization_metric": ["numpy", "cv2", "typing", "torch", "scipy", "torchvision"], "modelscope.metrics.text_ranking_metric": ["numpy", "typing"], "modelscope.metrics.audio_noise_metric": ["typing"], "modelscope.metrics.accuracy_metric": ["numpy", "typing"], "modelscope.metrics.image_quality_assessment_degradation_metric": ["scipy", "tqdm", "numpy", "tempfile", "cv2", "os", "sys", "typing", "torch", "collections"], "modelscope.metrics.image_color_enhance_metric": ["numpy", "typing", "cv2"], "modelscope.metrics.bleu_metric": ["typing", "sacrebleu", "itertools"], "modelscope.metrics.ciderD.ciderD_scorer": ["numpy", "pdb", "copy", "__future__", "math", "os", "six", "collections"], "modelscope.metrics.ciderD.ciderD": ["__future__"], "modelscope.metrics.action_detection_evaluator": ["logging", "scipy", "numpy", "copy", "os", "collections", "detectron2", "pandas"], "modelscope.metrics.prediction_saving_wrapper": ["numpy", "sklearn", "typing"], "modelscope.metrics.image_quality_assessment_mos_metric": ["tqdm", "numpy", "tempfile", "cv2", "os", "sys", "typing", "torch", "scipy"], "modelscope.metrics.referring_video_object_segmentation_metric": ["tqdm", "numpy", "pycocotools", "typing", "torch"], "modelscope.metrics.ocr_recognition_metric": ["edit_distance", "numpy", "torch", "typing"], "modelscope.metrics.video_stabilization_metric": ["tqdm", "numpy", "tempfile", "cv2", "os", "sys", "typing"], "modelscope.metrics.base": ["typing", "abc"], "modelscope.metrics.inbatch_recall_metric": ["numpy", "torch", "typing"], "modelscope.metrics.video_summarization_metric": ["numpy", "typing"], "modelscope.pipelines.nlp.dialog_state_tracking_pipeline": ["typing"], "modelscope.pipelines.nlp.distributed_gpt3_pipeline": ["typing", "torch"], "modelscope.pipelines.nlp.codegeex_code_translation_pipeline": ["typing"], "modelscope.pipelines.nlp.siamese_uie_pipeline": ["logging", "tqdm", "copy", "math", "os", "time", "typing", "pathlib", "torch", "scipy", "json"], "modelscope.pipelines.nlp.summarization_pipeline": ["typing", "torch"], "modelscope.pipelines.nlp.dialog_modeling_pipeline": ["typing"], "modelscope.pipelines.nlp.zero_shot_classification_pipeline": ["scipy", "typing", "torch"], "modelscope.pipelines.nlp.translation_evaluation_pipeline": ["numpy", "os", "typing", "torch", "enum"], "modelscope.pipelines.nlp.fid_dialogue_pipeline": ["re", "typing", "torch"], "modelscope.pipelines.nlp.document_grounded_dialog_rerank_pipeline": ["numpy", "os", "time", "pprint", "re", "random", "sys", "transformers", "typing", "torch", "collections", "ujson"], "modelscope.pipelines.nlp.text_classification_pipeline": ["numpy", "torch", "typing"], "modelscope.pipelines.nlp.document_grounded_dialog_generate_pipeline": ["typing"], "modelscope.pipelines.nlp.machine_reading_comprehension_pipeline": ["typing", "torch"], "modelscope.pipelines.nlp.token_classification_pipeline": ["numpy", "torch", "typing"], "modelscope.pipelines.nlp.translation_pipeline": ["numpy", "os", "tensorflow", "typing", "subword_nmt", "jieba", "sacremoses"], "modelscope.pipelines.nlp.sentence_embedding_pipeline": ["numpy", "torch", "typing"], "modelscope.pipelines.nlp.faq_question_answering_pipeline": ["typing"], "modelscope.pipelines.nlp.feature_extraction_pipeline": ["typing", "torch", "os"], "modelscope.pipelines.nlp.distributed_gpt_moe_pipeline": ["typing", "torch"], "modelscope.pipelines.nlp.text_generation_pipeline": ["typing", "transformers", "torch", "os"], "modelscope.pipelines.nlp.conversational_text_to_sql_pipeline": ["text2sql_lgesql", "typing", "torch"], "modelscope.pipelines.nlp.llama2_text_generation_pipeline": ["typing", "torch"], "modelscope.pipelines.nlp.canmt_translation_pipeline": ["sacremoses", "typing", "torch", "os"], "modelscope.pipelines.nlp.information_extraction_pipeline": ["typing", "torch"], "modelscope.pipelines.nlp.extractive_summarization_pipeline": ["numpy", "datasets", "re", "typing", "torch"], "modelscope.pipelines.nlp.fasttext_text_classification_pipeline": ["numpy", "os", "sentencepiece", "typing", "fasttext"], "modelscope.pipelines.nlp.table_question_answering_pipeline": ["os", "typing", "transformers", "torch", "json"], "modelscope.pipelines.nlp.glm130b_text_generation_pipeline": ["typing"], "modelscope.pipelines.nlp.fill_mask_pipeline": ["numpy", "typing"], "modelscope.pipelines.nlp.automatic_post_editing_pipeline": ["numpy", "html", "os", "sentencepiece", "tensorflow", "typing", "jieba", "sacremoses"], "modelscope.pipelines.nlp.document_segmentation_pipeline": ["numpy", "datasets", "re", "typing", "torch"], "modelscope.pipelines.nlp.word_alignment_pipeline": ["numpy", "typing"], "modelscope.pipelines.nlp.translation_quality_estimation_pipeline": ["os", "typing", "transformers", "torch", "io"], "modelscope.pipelines.nlp.codegeex_code_generation_pipeline": ["typing"], "modelscope.pipelines.nlp.polylm_text_generation_pipeline": ["typing", "torch", "os"], "modelscope.pipelines.nlp.distributed_plug_pipeline": ["typing", "torch"], "modelscope.pipelines.nlp.text_ranking_pipeline": ["numpy", "typing"], "modelscope.pipelines.nlp.language_identification_pipline": ["numpy", "os", "re", "tensorflow", "typing"], "modelscope.pipelines.nlp.dialog_intent_prediction_pipeline": ["typing"], "modelscope.pipelines.nlp.word_segmentation_pipeline": ["typing", "torch"], "modelscope.pipelines.nlp.text_error_correction_pipeline": ["typing", "torch"], "modelscope.pipelines.nlp.mglm_text_summarization_pipeline": ["typing", "os"], "modelscope.pipelines.nlp.named_entity_recognition_pipeline": ["typing"], "modelscope.pipelines.nlp.document_grounded_dialog_retrieval_pipeline": ["numpy", "os", "typing", "json", "faiss"], "modelscope.pipelines.nlp.interactive_translation_pipeline": ["numpy", "os", "tensorflow", "typing", "subword_nmt", "jieba", "sacremoses"], "modelscope.pipelines.nlp.user_satisfaction_estimation_pipeline": ["numpy", "torch", "typing"], "modelscope.pipelines.multi_modal.image_text_retrieval_pipeline": ["typing", "torch"], "modelscope.pipelines.multi_modal.soonet_video_temporal_grounding_pipeline": ["numpy", "os", "typing", "torch", "torchvision"], "modelscope.pipelines.multi_modal.video_multi_modal_embedding_pipeline": ["typing"], "modelscope.pipelines.multi_modal.multimodal_dialogue_pipeline": ["typing", "torch"], "modelscope.pipelines.multi_modal.document_vl_embedding_pipeline": ["typing", "torch"], "modelscope.pipelines.multi_modal.videocomposer_pipeline": ["PIL", "numpy", "tempfile", "cv2", "time", "os", "imageio", "functools", "subprocess", "random", "typing", "torch", "mvextractor", "torchvision"], "modelscope.pipelines.multi_modal.gridvlp_pipeline": ["PIL", "numpy", "time", "os", "traceback", "typing", "transformers", "torch", "json"], "modelscope.pipelines.multi_modal.text2sql_pipeline": ["typing", "torch"], "modelscope.pipelines.multi_modal.image_to_video_pipeline": ["tempfile", "cv2", "os", "einops", "subprocess", "typing", "torch"], "modelscope.pipelines.multi_modal.multi_modal_embedding_pipeline": ["typing"], "modelscope.pipelines.multi_modal.diffusers_wrapped.stable_diffusion.chinese_stable_diffusion_pipeline": ["PIL", "numpy", "cv2", "typing", "transformers", "torch", "diffusers"], "modelscope.pipelines.multi_modal.diffusers_wrapped.stable_diffusion.stable_diffusion_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch", "diffusers", "torchvision"], "modelscope.pipelines.multi_modal.diffusers_wrapped.diffusers_pipeline": ["typing", "os"], "modelscope.pipelines.multi_modal.efficient_diffusion_tuning_pipeline": ["PIL", "numpy", "cv2", "typing", "torch", "torchvision"], "modelscope.pipelines.multi_modal.image_captioning_pipeline": ["numpy", "torch", "typing"], "modelscope.pipelines.multi_modal.visual_entailment_pipeline": ["typing", "torch"], "modelscope.pipelines.multi_modal.team_multi_modal_similarity_pipeline": ["typing"], "modelscope.pipelines.multi_modal.text_to_image_synthesis_pipeline": ["typing", "torch"], "modelscope.pipelines.multi_modal.ocr_recognition_pipeline": ["typing", "torch"], "modelscope.pipelines.multi_modal.mgeo_ranking_pipeline": ["numpy", "torch", "typing"], "modelscope.pipelines.multi_modal.video_to_video_pipeline": ["tempfile", "cv2", "os", "einops", "subprocess", "typing", "torch"], "modelscope.pipelines.multi_modal.disco_guided_diffusion_pipeline.disco_guided_diffusion": ["importlib", "numpy", "PIL", "math", "os", "gc", "cv2", "clip", "torch", "json", "torchvision"], "modelscope.pipelines.multi_modal.disco_guided_diffusion_pipeline.utils": ["numpy", "warnings", "torch", "math", "fractions"], "modelscope.pipelines.multi_modal.generative_multi_modal_embedding_pipeline": ["typing"], "modelscope.pipelines.multi_modal.visual_question_answering_pipeline": ["typing", "torch"], "modelscope.pipelines.multi_modal.text_to_video_synthesis_pipeline": ["einops", "tempfile", "cv2", "os", "typing", "torch"], "modelscope.pipelines.multi_modal.sudoku_pipeline": ["typing", "torch"], "modelscope.pipelines.multi_modal.video_question_answering_pipeline": ["typing", "torch"], "modelscope.pipelines.multi_modal.asr_pipeline": ["typing", "torch"], "modelscope.pipelines.multi_modal.visual_grounding_pipeline": ["typing", "torch"], "modelscope.pipelines.multi_modal.video_captioning_pipeline": ["typing", "torch"], "modelscope.pipelines.audio.speaker_verification_pipeline": ["shutil", "typing", "yaml", "os"], "modelscope.pipelines.audio.timestamp_pipeline": ["os", "typing", "yaml", "json", "funasr"], "modelscope.pipelines.audio.language_recognition_pipeline": ["numpy", "soundfile", "os", "torchaudio", "typing", "torch", "io"], "modelscope.pipelines.audio.speaker_diarization_dialogue_detection_pipeline": ["numpy", "typing"], "modelscope.pipelines.audio.kws_farfield_pipeline": ["numpy", "soundfile", "typing", "io", "wave"], "modelscope.pipelines.audio.lm_infer_pipeline": ["typing", "os"], "modelscope.pipelines.audio.speaker_change_locating_pipeline": ["numpy", "soundfile", "torchaudio", "typing", "torch", "io"], "modelscope.pipelines.audio.asr_wenet_inference_pipeline": ["typing"], "modelscope.pipelines.audio.ans_pipeline": ["numpy", "soundfile", "typing", "torch", "io", "librosa"], "modelscope.pipelines.audio.speaker_verification_light_pipeline": ["numpy", "soundfile", "os", "torchaudio", "typing", "torch", "io"], "modelscope.pipelines.audio.asr_inference_pipeline": ["typing", "json", "yaml", "os"], "modelscope.pipelines.audio.ans_dfsmn_pipeline": ["numpy", "soundfile", "os", "sys", "typing", "torch", "io", "collections", "librosa"], "modelscope.pipelines.audio.text_to_speech_pipeline": ["numpy", "typing"], "modelscope.pipelines.audio.inverse_text_processing_pipeline": ["shutil", "typing", "yaml", "os"], "modelscope.pipelines.audio.speaker_diarization_semantic_speaker_turn_detection_pipeline": ["numpy", "torch", "typing"], "modelscope.pipelines.audio.kws_kwsbp_pipeline": ["typing", "json", "os"], "modelscope.pipelines.audio.punctuation_processing_pipeline": ["shutil", "typing", "yaml", "os"], "modelscope.pipelines.audio.segmentation_clustering_pipeline": ["numpy", "soundfile", "torchaudio", "typing", "torch", "io"], "modelscope.pipelines.audio.separation_pipeline": ["numpy", "soundfile", "typing", "torch", "io"], "modelscope.pipelines.audio.speaker_verification_rdino_pipeline": ["io", "typing", "soundfile", "torch"], "modelscope.pipelines.audio.speaker_verification_eres2net_pipeline": ["io", "typing", "soundfile", "torch"], "modelscope.pipelines.audio.voice_activity_detection_pipeline": ["os", "typing", "yaml", "json", "funasr"], "modelscope.pipelines.audio.speaker_diarization_pipeline": ["numpy", "os", "shutil", "typing", "yaml", "json"], "modelscope.pipelines.audio.linear_aec_pipeline": ["importlib", "numpy", "os", "typing", "yaml", "torch", "scipy"], "modelscope.pipelines.cv.image_portrait_enhancement_pipeline": ["PIL", "numpy", "cv2", "math", "typing", "torch", "scipy"], "modelscope.pipelines.cv.vop_retrieval_pipeline": ["tqdm", "numpy", "math", "os", "random", "typing", "pickle", "torch", "collections", "gzip"], "modelscope.pipelines.cv.face_attribute_recognition_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.vision_efficient_tuning_pipeline": ["numpy", "torch", "typing", "torchvision"], "modelscope.pipelines.cv.tbs_detection_utils.utils": ["PIL", "numpy", "__future__", "os", "colorsys", "matplotlib", "torch", "pandas", "torchvision"], "modelscope.pipelines.cv.mask_face_recognition_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch", "collections"], "modelscope.pipelines.cv.image_bts_depth_estimation_pipeline": ["numpy", "cv2", "typing", "albumentations", "torch"], "modelscope.pipelines.cv.video_panoptic_segmentation_pipeline": ["tqdm", "numpy", "cv2", "os", "typing", "torch", "mmcv"], "modelscope.pipelines.cv.video_colorization_pipeline": ["PIL", "numpy", "tempfile", "cv2", "os", "subprocess", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.arc_face_recognition_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.action_detection_pipeline": ["typing", "math", "os"], "modelscope.pipelines.cv.image_paintbyexample_pipeline": ["PIL", "numpy", "einops", "cv2", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.table_recognition_pipeline": ["PIL", "numpy", "cv2", "math", "os", "typing", "torch"], "modelscope.pipelines.cv.image_to_image_translation_pipeline": ["PIL", "numpy", "cv2", "os", "sys", "typing", "torch", "io", "torchvision"], "modelscope.pipelines.cv.content_check_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.image_classification_pipeline": ["numpy", "torch", "typing"], "modelscope.pipelines.cv.video_human_matting_pipeline": ["moviepy", "numpy", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.action_recognition_pipeline": ["typing", "torch", "math", "os"], "modelscope.pipelines.cv.image_face_fusion_pipeline": ["numpy", "typing"], "modelscope.pipelines.cv.vop_retrieval_se_pipeline": ["numpy", "os", "typing", "torch", "gzip"], "modelscope.pipelines.cv.image_mvs_depth_estimation_pipeline": ["shutil", "typing", "tempfile", "os"], "modelscope.pipelines.cv.realtime_video_object_detection_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch", "json", "torchvision"], "modelscope.pipelines.cv.pointcloud_sceneflow_estimation_pipeline": ["numpy", "torch", "typing", "plyfile"], "modelscope.pipelines.cv.hicossl_video_embedding_pipeline": ["typing", "torch", "math", "os"], "modelscope.pipelines.cv.image_deblur_pipeline": ["typing", "torch", "torchvision"], "modelscope.pipelines.cv.body_2d_keypoints_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch", "json", "torchvision"], "modelscope.pipelines.cv.maskdino_instance_segmentation_pipeline": ["typing", "torch", "torchvision"], "modelscope.pipelines.cv.face_image_generation_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.image_defrcn_fewshot_pipeline": ["numpy", "torch", "typing", "os"], "modelscope.pipelines.cv.video_stabilization_pipeline": ["numpy", "tempfile", "math", "os", "cv2", "subprocess", "typing", "glob", "torch"], "modelscope.pipelines.cv.face_recognition_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.controllable_image_generation_pipeline": ["numpy", "tempfile", "math", "os", "cv2", "subprocess", "typing", "glob", "torch"], "modelscope.pipelines.cv.video_depth_estimation_pipeline": ["typing"], "modelscope.pipelines.cv.text_to_360panorama_image_pipeline": ["PIL", "numpy", "basicsr", "random", "typing", "torch", "realesrgan", "diffusers"], "modelscope.pipelines.cv.ddcolor_image_colorization_pipeline": ["numpy", "cv2", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.license_plate_detection_pipeline": ["PIL", "numpy", "cv2", "math", "os", "typing", "torch"], "modelscope.pipelines.cv.image_human_parsing_pipeline": ["numpy", "torch", "typing", "torchvision"], "modelscope.pipelines.cv.crowd_counting_pipeline": ["PIL", "numpy", "math", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.image_reid_person_pipeline": ["PIL", "math", "os", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.image_driving_perception_pipeline": ["numpy", "typing", "cv2", "os"], "modelscope.pipelines.cv.nerf_recon_vq_compression_pipeline": ["typing"], "modelscope.pipelines.cv.mobile_image_super_resolution_pipeline": ["numpy", "skimage", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.image_skychange_pipeline": ["PIL", "numpy", "pdb", "cv2", "time", "typing"], "modelscope.pipelines.cv.cmdssl_video_embedding_pipeline": ["PIL", "numpy", "os", "typing", "torch", "decord", "torchvision"], "modelscope.pipelines.cv.video_frame_interpolation_pipeline": ["numpy", "tempfile", "math", "os", "cv2", "subprocess", "typing", "glob", "torch", "torchvision"], "modelscope.pipelines.cv.image_detection_pipeline": ["numpy", "typing"], "modelscope.pipelines.cv.face_emotion_pipeline": ["numpy", "typing"], "modelscope.pipelines.cv.tbs_detection_pipeline": ["PIL", "numpy", "cv2", "os", "colorsys", "typing", "torch"], "modelscope.pipelines.cv.image_matching_pipeline": ["PIL", "numpy", "cv2", "typing", "torch"], "modelscope.pipelines.cv.product_segmentation_pipeline": ["numpy", "typing"], "modelscope.pipelines.cv.shop_segmentation_pipleline": ["typing"], "modelscope.pipelines.cv.human_reconstruction_pipeline": ["numpy", "os", "shutil", "typing", "torch", "trimesh"], "modelscope.pipelines.cv.video_multi_object_tracking_pipeline": ["typing", "torch", "os"], "modelscope.pipelines.cv.tinynas_classification_pipeline": ["math", "os", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.video_inpainting_pipeline": ["typing"], "modelscope.pipelines.cv.image_cartoon_pipeline": ["numpy", "cv2", "os", "tensorflow", "typing"], "modelscope.pipelines.cv.fast_instance_segmentation_pipeline": ["numpy", "torch", "typing", "torchvision"], "modelscope.pipelines.cv.movie_scene_segmentation_pipeline": ["typing", "torch"], "modelscope.pipelines.cv.language_guided_video_summarization_pipeline": ["PIL", "numpy", "tempfile", "cv2", "os", "shutil", "clip", "random", "typing", "torch"], "modelscope.pipelines.cv.image_denoise_pipeline": ["typing", "torch", "torchvision"], "modelscope.pipelines.cv.face_quality_assessment_pipeline": ["onnxruntime", "numpy", "PIL", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.text_driven_segmentation_pipleline": ["typing"], "modelscope.pipelines.cv.face_processing_base_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.vidt_pipeline": ["typing", "torch", "torchvision"], "modelscope.pipelines.cv.image_to_image_generate_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.nerf_recon_4k_pipeline": ["typing"], "modelscope.pipelines.cv.image_inpainting_sdv2_pipeline": ["numpy", "tempfile", "cv2", "math", "os", "sys", "typing", "torch", "diffusers"], "modelscope.pipelines.cv.image_super_resolution_pipeline": ["PIL", "numpy", "cv2", "typing", "torch"], "modelscope.pipelines.cv.mog_face_detection_pipeline": ["numpy", "typing", "os"], "modelscope.pipelines.cv.image_quality_assessment_mos_pipeline": ["numpy", "tempfile", "cv2", "math", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.image_body_reshaping_pipeline": ["typing"], "modelscope.pipelines.cv.retina_face_detection_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.image_quality_assessment_degradation_pipeline": ["numpy", "tempfile", "cv2", "math", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.nerf_recon_acc_pipeline": ["typing"], "modelscope.pipelines.cv.panorama_depth_estimation_pipeline": ["PIL", "numpy", "cv2", "typing", "torch"], "modelscope.pipelines.cv.vision_middleware_pipeline": ["numpy", "math", "os", "typing", "torch", "mmcv", "torchvision"], "modelscope.pipelines.cv.face_liveness_ir_pipeline": ["onnxruntime", "numpy", "PIL", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.lineless_table_recognition_pipeline": ["PIL", "numpy", "cv2", "math", "os", "typing", "torch"], "modelscope.pipelines.cv.facial_expression_recognition_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.skin_retouching_pipeline": ["PIL", "numpy", "cv2", "os", "tensorflow", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.ddpm_semantic_segmentation_pipeline": ["typing", "torch", "torchvision"], "modelscope.pipelines.cv.image_colorization_pipeline": ["PIL", "numpy", "cv2", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.image_open_vocabulary_detection_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.image_depth_estimation_pipeline": ["PIL", "numpy", "cv2", "typing", "torch"], "modelscope.pipelines.cv.indoor_layout_estimation_pipeline": ["numpy", "cv2", "typing"], "modelscope.pipelines.cv.image_try_on_pipeline": ["numpy", "torch", "typing"], "modelscope.pipelines.cv.video_super_resolution_pipeline": ["numpy", "tempfile", "cv2", "math", "os", "subprocess", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.video_instance_segmentation_pipeline": ["tqdm", "numpy", "cv2", "os", "typing", "torch", "mmcv"], "modelscope.pipelines.cv.card_detection_pipeline": ["typing"], "modelscope.pipelines.cv.image_matting_pipeline": ["numpy", "cv2", "os", "tensorflow", "typing"], "modelscope.pipelines.cv.referring_video_object_segmentation_pipeline": ["moviepy", "numpy", "PIL", "tqdm", "tempfile", "einops", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.face_recognition_onnx_ir_pipeline": ["onnxruntime", "numpy", "PIL", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.general_recognition_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.motion_generation_pipeline": ["numpy", "tempfile", "os", "typing", "torch"], "modelscope.pipelines.cv.image_color_enhance_pipeline": ["typing", "torch", "torchvision"], "modelscope.pipelines.cv.object_detection_3d_pipeline": ["PIL", "numpy", "tempfile", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.video_single_object_tracking_pipeline": ["typing", "cv2", "os"], "modelscope.pipelines.cv.ulfd_face_detection_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.pedestrian_attribute_recognition_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch", "json", "torchvision"], "modelscope.pipelines.cv.face_reconstruction_pipeline": ["face_alignment", "PIL", "numpy", "cv2", "os", "shutil", "tensorflow", "typing", "torch", "io", "scipy"], "modelscope.pipelines.cv.image_style_transfer_pipeline": ["numpy", "typing", "cv2", "os"], "modelscope.pipelines.cv.ocr_recognition_pipeline": [], "modelscope.pipelines.cv.image_semantic_segmentation_pipeline": ["PIL", "numpy", "cv2", "typing", "torch"], "modelscope.pipelines.cv.image_structured_model_probing_pipeline": ["numpy", "math", "os", "typing", "torch", "mmcv", "torchvision"], "modelscope.pipelines.cv.image_instance_segmentation_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.image_panoptic_segmentation_pipeline": ["PIL", "numpy", "cv2", "typing", "torch"], "modelscope.pipelines.cv.face_human_hand_detection_pipeline": ["numpy", "typing"], "modelscope.pipelines.cv.video_summarization_pipeline": ["tqdm", "numpy", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.image_quality_assessment_man_pipeline": ["numpy", "tempfile", "cv2", "math", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.body_3d_keypoints_pipeline": ["numpy", "tempfile", "cv2", "os", "mpl_toolkits", "typing", "matplotlib", "torch", "datetime"], "modelscope.pipelines.cv.face_recognition_ood_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.video_deinterlace_pipeline": ["numpy", "tempfile", "cv2", "math", "os", "subprocess", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.virtual_try_on_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.ocr_detection_pipeline": ["numpy", "math", "cv2", "os", "tensorflow", "typing", "tf_slim", "torch"], "modelscope.pipelines.cv.face_recognition_onnx_fm_pipeline": ["onnxruntime", "numpy", "PIL", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.ocr_utils.model_convnext_transformer": ["torch"], "modelscope.pipelines.cv.ocr_utils.ops": ["numpy", "absl", "math", "os", "cv2", "shutil", "tensorflow", "sys", "uuid"], "modelscope.pipelines.cv.ocr_utils.ocr_modules.vitstr": ["logging", "__future__", "copy", "functools", "torch"], "modelscope.pipelines.cv.ocr_utils.ocr_modules.timm_tinyc": ["logging", "copy", "torch", "math", "itertools", "collections", "functools"], "modelscope.pipelines.cv.ocr_utils.ocr_modules.convnext": ["torch"], "modelscope.pipelines.cv.ocr_utils.resnet_utils": ["collections", "tf_slim", "tensorflow"], "modelscope.pipelines.cv.ocr_utils.utils": ["shapely", "numpy", "pyclipper", "cv2"], "modelscope.pipelines.cv.ocr_utils.table_process": ["numpy", "copy", "torch", "cv2", "math", "random"], "modelscope.pipelines.cv.ocr_utils.model_dla34": ["numpy", "torch", "math", "os"], "modelscope.pipelines.cv.ocr_utils.model_vlpt": ["sys", "torch", "math", "os"], "modelscope.pipelines.cv.ocr_utils.model_resnet_mutex_v4_linewithchar": ["tf_slim", "tensorflow"], "modelscope.pipelines.cv.ocr_utils.model_resnet18_half": ["torch", "os"], "modelscope.pipelines.cv.ocr_utils.resnet18_v1": ["tf_slim", "tensorflow"], "modelscope.pipelines.cv.animal_recognition_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.image_inpainting_pipeline": ["PIL", "numpy", "cv2", "typing", "torch"], "modelscope.pipelines.cv.image_salient_detection_pipeline": ["typing"], "modelscope.pipelines.cv.bad_image_detecting_pipeline": ["numpy", "torch", "typing"], "modelscope.pipelines.cv.product_retrieval_embedding_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.video_category_pipeline": ["PIL", "numpy", "os", "typing", "torch", "json", "decord", "torchvision"], "modelscope.pipelines.cv.face_detection_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.face_liveness_xc_pipeline": ["onnxruntime", "numpy", "PIL", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.panorama_depth_estimation_s2net_pipeline": ["PIL", "numpy", "cv2", "typing", "torch"], "modelscope.pipelines.cv.mtcnn_face_detection_pipeline": ["typing", "torch", "os"], "modelscope.pipelines.cv.video_object_segmentation_pipeline": ["PIL", "numpy", "os", "typing", "torch", "torchvision"], "modelscope.pipelines.cv.facial_landmark_confidence_pipeline": ["PIL", "numpy", "cv2", "os", "typing", "torch"], "modelscope.pipelines.cv.image_restoration_pipeline": ["typing"], "modelscope.pipelines.cv.image_debanding_pipeline": ["typing", "torch", "torchvision"], "modelscope.pipelines.cv.live_category_pipeline": ["PIL", "numpy", "os", "typing", "torch", "decord", "torchvision"], "modelscope.pipelines.cv.hand_static_pipeline": ["numpy", "typing"], "modelscope.pipelines.cv.tinynas_detection_pipeline": ["typing"], "modelscope.pipelines.builder": ["typing", "os"], "modelscope.pipelines.science.protein_structure_pipeline": ["numpy", "time", "os", "typing", "unicore", "torch", "json"], "modelscope.pipelines.pipeline_template": ["numpy", "typing"], "modelscope.pipelines.util": ["typing", "os"], "modelscope.pipelines.base": ["numpy", "multiprocessing", "os", "functools", "packaging", "abc", "threading", "random", "typing", "torch"], "modelscope.preprocessors.nlp.relation_extraction_preprocessor": ["typing", "transformers"], "modelscope.preprocessors.nlp.space.batch": [], "modelscope.preprocessors.nlp.space.dialog_state_tracking_preprocessor": ["typing"], "modelscope.preprocessors.nlp.space.tensorlistdataset": ["torch"], "modelscope.preprocessors.nlp.space.args": ["argparse", "json"], "modelscope.preprocessors.nlp.space.tokenizer": ["logging", "__future__", "regex", "os", "functools", "unicodedata", "sys", "collections", "json"], "modelscope.preprocessors.nlp.space.lazy_dataset": ["json"], "modelscope.preprocessors.nlp.space.dialog_intent_prediction_preprocessor": ["typing", "json", "os"], "modelscope.preprocessors.nlp.space.data_loader": ["numpy", "math", "os"], "modelscope.preprocessors.nlp.space.dst_processors": ["logging", "tqdm", "numpy", "six", "json", "re"], "modelscope.preprocessors.nlp.space.sampler": ["numpy"], "modelscope.preprocessors.nlp.space.dialog_modeling_preprocessor": ["typing", "os"], "modelscope.preprocessors.nlp.space.preprocess": ["glob", "os"], "modelscope.preprocessors.nlp.space.fields.intent_field": ["tqdm", "numpy", "multiprocessing", "os", "time", "itertools", "re", "random", "glob", "collections", "json"], "modelscope.preprocessors.nlp.space.fields.gen_field": ["numpy", "itertools", "os", "random", "collections", "json", "asyncio"], "modelscope.preprocessors.nlp.transformers_tokenizer": ["collections", "transformers", "json", "os"], "modelscope.preprocessors.nlp.space_T_en.conversational_text_to_sql_preprocessor": ["os", "text2sql_lgesql", "typing", "torch", "json"], "modelscope.preprocessors.nlp.space_T_en.fields.parse": [], "modelscope.preprocessors.nlp.space_T_en.fields.preprocess_dataset": ["text2sql_lgesql"], "modelscope.preprocessors.nlp.space_T_en.fields.process_dataset": ["text2sql_lgesql", "sys", "pickle", "os"], "modelscope.preprocessors.nlp.space_T_en.fields.common_utils": ["numpy", "nltk", "sqlite3", "itertools", "os", "text2sql_lgesql"], "modelscope.preprocessors.nlp.text_classification_preprocessor": ["numpy", "typing"], "modelscope.preprocessors.nlp.document_grounded_dialog_generate_preprocessor": ["typing", "transformers", "torch", "os"], "modelscope.preprocessors.nlp.token_classification_viet_preprocessor": ["typing", "torch"], "modelscope.preprocessors.nlp.word_alignment_preprocessor": ["numpy", "itertools", "os", "typing", "torch"], "modelscope.preprocessors.nlp.siamese_uie_preprocessor": ["typing", "transformers"], "modelscope.preprocessors.nlp.mgeo_ranking_preprocessor": ["typing", "transformers", "torch"], "modelscope.preprocessors.nlp.canmt_translation": ["os", "typing", "subword_nmt", "torch", "jieba", "sacremoses"], "modelscope.preprocessors.nlp.sentence_embedding_preprocessor": ["typing"], "modelscope.preprocessors.nlp.zero_shot_classification_preprocessor": ["typing"], "modelscope.preprocessors.nlp.translation_evaluation_preprocessor": ["typing", "transformers", "torch"], "modelscope.preprocessors.nlp.dialog_classification_use_preprocessor": ["typing", "transformers", "torch"], "modelscope.preprocessors.nlp.token_classification_thai_preprocessor": ["typing"], "modelscope.preprocessors.nlp.document_grounded_dialog_rerank_preprocessor": ["copy", "os", "typing", "transformers", "torch"], "modelscope.preprocessors.nlp.utils": ["numpy", "os", "transformers", "typing", "collections", "json"], "modelscope.preprocessors.nlp.space_T_cn.table_question_answering_preprocessor": ["typing", "transformers", "torch", "os"], "modelscope.preprocessors.nlp.space_T_cn.fields.database": ["tqdm", "json", "sqlite3"], "modelscope.preprocessors.nlp.space_T_cn.fields.schema_link": ["re"], "modelscope.preprocessors.nlp.space_T_cn.fields.struct": [], "modelscope.preprocessors.nlp.bert_seq_cls_tokenizer": ["typing", "transformers"], "modelscope.preprocessors.nlp.document_grounded_dialog_retrieval_preprocessor": ["typing", "transformers", "torch", "os"], "modelscope.preprocessors.nlp.fill_mask_preprocessor": ["numpy", "os", "abc", "re", "typing", "torch"], "modelscope.preprocessors.nlp.machine_reading_comprehension_preprocessor": ["transformers", "torch", "os"], "modelscope.preprocessors.nlp.text_error_correction": ["typing", "transformers", "torch", "os"], "modelscope.preprocessors.nlp.token_classification_preprocessor": ["numpy", "torch", "typing"], "modelscope.preprocessors.nlp.faq_question_answering_preprocessor": ["typing", "torch"], "modelscope.preprocessors.nlp.feature_extraction_preprocessor": ["numpy", "typing"], "modelscope.preprocessors.nlp.text_ranking_preprocessor": ["typing", "transformers"], "modelscope.preprocessors.nlp.text_clean": ["codecs", "sys", "re"], "modelscope.preprocessors.nlp.text_generation_preprocessor": ["numpy", "torch", "typing", "os"], "modelscope.preprocessors.nlp.mglm_summarization_preprocessor": ["typing", "re", "os"], "modelscope.preprocessors.nlp.document_segmentation_preprocessor": ["typing"], "modelscope.preprocessors.asr": ["typing", "os"], "modelscope.preprocessors.image": ["PIL", "numpy", "cv2", "typing", "io"], "modelscope.preprocessors.audio": ["numpy", "os", "typing", "torch", "io", "scipy"], "modelscope.preprocessors.movie_scene_segmentation.transforms": ["PIL", "numpy", "os", "random", "typing", "torch", "numbers", "torchvision"], "modelscope.preprocessors.kws": ["typing", "yaml", "os"], "modelscope.preprocessors.multi_modal": ["PIL", "numpy", "timm", "os", "re", "typing", "torch", "io", "json", "decord", "torchvision"], "modelscope.preprocessors.cv.bad_image_detecting_preprocessor": ["PIL", "numpy", "math", "typing", "torch", "torchvision"], "modelscope.preprocessors.cv.video_stabilization": ["numpy", "torch", "cv2"], "modelscope.preprocessors.cv.image_quality_assessment_man": ["PIL", "numpy", "math", "typing", "torch", "torchvision"], "modelscope.preprocessors.cv.cv2_transforms": ["numpy", "torch", "cv2", "math", "numbers", "collections", "random"], "modelscope.preprocessors.cv.timer": ["time"], "modelscope.preprocessors.cv.mmcls_preprocessor": ["numpy", "typing", "os"], "modelscope.preprocessors.cv.video_super_resolution": ["collections", "cv2", "os"], "modelscope.preprocessors.cv.image_restoration_preprocessor": ["PIL", "numpy", "math", "typing", "torch", "torchvision"], "modelscope.preprocessors.cv.image_quality_assessment_mos": ["numpy", "math", "cv2", "typing", "torchvision"], "modelscope.preprocessors.cv.controllable_image_generation": ["PIL", "numpy", "cv2", "math", "os", "typing", "torch", "torchvision"], "modelscope.preprocessors.cv.image_classification_preprocessor": ["PIL", "numpy", "cv2", "os", "typing", "torch", "torchvision"], "modelscope.preprocessors.cv.action_detection_mapper": ["detectron2", "numpy", "copy", "torch", "scipy", "decord", "random"], "modelscope.preprocessors.cv.util": ["shutil", "sys", "collections", "os"], "modelscope.preprocessors.builder": [], "modelscope.preprocessors.tts": ["typing", "kantts", "os"], "modelscope.preprocessors.speaker": ["typing", "torch"], "modelscope.preprocessors.video": ["numpy", "tempfile", "math", "os", "random", "uuid", "torch", "urllib", "decord", "torchvision"], "modelscope.preprocessors.ofa.image_classification": ["PIL", "timm", "functools", "typing", "torch", "torchvision"], "modelscope.preprocessors.ofa.asr": ["soundfile", "os", "fairseq", "random", "typing", "pathlib", "torch", "librosa"], "modelscope.preprocessors.ofa.text_classification": ["typing", "torch"], "modelscope.preprocessors.ofa.ocr_recognition": ["unicodedata2", "typing", "torch", "zhconv", "torchvision"], "modelscope.preprocessors.ofa.text_to_image_synthesis": ["typing", "torch"], "modelscope.preprocessors.ofa.utils.get_tables": ["sys", "traceback", "sqlite3"], "modelscope.preprocessors.ofa.utils.vision_helper": ["numpy", "cv2"], "modelscope.preprocessors.ofa.utils.bridge_content_encoder": ["typing", "difflib", "sqlite3", "functools", "rapidfuzz"], "modelscope.preprocessors.ofa.utils.random_help": ["torch", "torch_xla"], "modelscope.preprocessors.ofa.utils.text2phone": [], "modelscope.preprocessors.ofa.utils.audio_helper": ["numpy", "torch", "typing"], "modelscope.preprocessors.ofa.utils.transforms": ["PIL", "numpy", "torch", "random", "torchvision"], "modelscope.preprocessors.ofa.utils.collate": ["numpy", "torch", "typing"], "modelscope.preprocessors.ofa.utils.constant": [], "modelscope.preprocessors.ofa.summarization": ["typing", "torch"], "modelscope.preprocessors.ofa.sudoku": ["numpy", "torch", "typing"], "modelscope.preprocessors.ofa.visual_question_answering": ["PIL", "typing", "torch", "torchvision"], "modelscope.preprocessors.ofa.text2sql": ["os", "re", "random", "typing", "torch"], "modelscope.preprocessors.ofa.visual_entailment": ["PIL", "typing", "torch", "torchvision"], "modelscope.preprocessors.ofa.image_captioning": ["typing", "torch", "torchvision"], "modelscope.preprocessors.ofa.base": ["PIL", "numpy", "string", "os", "torchaudio", "re", "torch", "io", "json"], "modelscope.preprocessors.ofa.visual_grounding": ["PIL", "numpy", "typing", "torch", "torchvision"], "modelscope.preprocessors.common": ["numpy", "time", "typing", "torch", "collections"], "modelscope.preprocessors.science.uni_fold": ["logging", "os", "tarfile", "requests", "re", "random", "unittest", "pathlib", "pickle", "json", "gzip", "tqdm", "numpy", "time", "ipdb", "hashlib", "typing", "torch"], "modelscope.preprocessors.base": ["typing", "abc", "os"], "modelscope.trainers.nlp.faq_question_answering_trainer": ["distutils", "numpy", "functools", "contextlib", "dataclasses", "typing", "torch", "collections"], "modelscope.trainers.nlp.space.metrics.metrics_tracker": ["collections", "math"], "modelscope.trainers.nlp.space.dialog_modeling_trainer": ["numpy", "os", "typing", "time"], "modelscope.trainers.nlp.space.trainer.intent_trainer": ["tqdm", "numpy", "time", "os", "transformers", "torch", "collections", "json"], "modelscope.trainers.nlp.space.trainer.gen_trainer": ["tqdm", "numpy", "time", "os", "transformers", "torch", "collections", "json"], "modelscope.trainers.nlp.space.eval": ["numpy", "math", "nltk", "sklearn", "collections", "json"], "modelscope.trainers.nlp.space.dialog_intent_trainer": ["numpy", "typing", "os"], "modelscope.trainers.nlp.csanmt_translation_trainer": ["typing", "os", "tensorflow", "time"], "modelscope.trainers.nlp.document_grounded_dialog_rerank_trainer": ["numpy", "time", "os", "random", "typing", "transformers", "torch"], "modelscope.trainers.nlp.text_ranking_trainer": ["tqdm", "numpy", "time", "dataclasses", "typing", "torch"], "modelscope.trainers.nlp.document_grounded_dialog_retrieval_trainer": ["tqdm", "numpy", "os", "transformers", "torch", "json", "faiss"], "modelscope.trainers.nlp.sequence_classification_trainer": ["numpy", "typing", "time"], "modelscope.trainers.nlp.table_question_answering_trainer": ["tqdm", "numpy", "time", "os", "typing", "torch", "json"], "modelscope.trainers.nlp.plug_trainer": ["os", "deepspeed", "typing", "torch", "megatron_util"], "modelscope.trainers.nlp.text_generation_trainer": ["typing", "torch"], "modelscope.trainers.nlp.siamese_uie_trainer": ["numpy", "math", "os", "time", "random", "typing", "torch", "collections", "json"], "modelscope.trainers.nlp.gpt_moe_trainer": ["os", "typing", "torch", "megatron_util", "collections"], "modelscope.trainers.nlp.document_grounded_dialog_generate_trainer": ["tqdm", "string", "os", "re", "rouge", "transformers", "torch", "sacrebleu", "collections", "json"], "modelscope.trainers.nlp.translation_evaluation_trainer": ["tqdm", "math", "os", "random", "typing", "transformers", "torch", "pandas"], "modelscope.trainers.nlp.sentence_embedding_trainer": ["tqdm", "numpy", "time", "dataclasses", "typing", "transformers", "torch"], "modelscope.trainers.nlp.gpt3_trainer": ["typing", "copy", "torch", "os"], "modelscope.trainers.multi_modal.mplug.mplug_trainer": ["collections", "typing", "torch"], "modelscope.trainers.multi_modal.lora_diffusion.lora_diffusion_trainer": ["typing", "torch", "diffusers"], "modelscope.trainers.multi_modal.custom_diffusion.custom_diffusion_trainer": ["diffusers", "numpy", "PIL", "tqdm", "itertools", "os", "random", "hashlib", "typing", "pathlib", "torch", "warnings", "json", "torchvision"], "modelscope.trainers.multi_modal.stable_diffusion.stable_diffusion_trainer": ["typing", "torch"], "modelscope.trainers.multi_modal.mgeo_ranking_trainer": ["dataclasses", "typing", "torch"], "modelscope.trainers.multi_modal.dreambooth_diffusion.dreambooth_diffusion_trainer": ["diffusers", "PIL", "tqdm", "itertools", "shutil", "hashlib", "typing", "pathlib", "torch", "warnings", "collections", "torchvision"], "modelscope.trainers.multi_modal.ofa.ofa_trainer": ["tempfile", "math", "os", "shutil", "functools", "typing", "torch", "json"], "modelscope.trainers.multi_modal.ofa.ofa_trainer_utils": ["numpy", "transformers", "torch", "math", "os", "shutil"], "modelscope.trainers.multi_modal.team.team_trainer_utils": ["PIL", "torch", "torchvision"], "modelscope.trainers.multi_modal.team.team_trainer": ["numpy", "os", "sklearn", "typing", "torch", "collections"], "modelscope.trainers.multi_modal.clip.clip_trainer_utils": ["math", "os", "functools", "torch", "inspect"], "modelscope.trainers.multi_modal.clip.clip_trainer": ["typing", "torch", "math", "os"], "modelscope.trainers.multi_modal.efficient_diffusion_tuning.efficient_diffusion_tuning_trainer": ["typing", "torch"], "modelscope.trainers.optimizer.builder": ["typing", "torch", "inspect"], "modelscope.trainers.default_config": ["typing"], "modelscope.trainers.audio.tts_trainer": ["tempfile", "zipfile", "os", "shutil", "typing", "json"], "modelscope.trainers.audio.separation_trainer": ["tqdm", "numpy", "os", "torchaudio", "typing", "torch", "csv", "speechbrain"], "modelscope.trainers.audio.asr_trainer": ["tempfile", "os", "shutil", "typing", "json", "funasr"], "modelscope.trainers.audio.kws_farfield_trainer": ["numpy", "math", "os", "typing", "glob", "pickle", "torch", "datetime"], "modelscope.trainers.audio.ans_trainer": [], "modelscope.trainers.audio.kws_utils.file_utils": ["re"], "modelscope.trainers.audio.kws_utils.batch_utils": ["numpy", "math", "os", "sys", "typing", "torch", "collections", "datetime"], "modelscope.trainers.audio.kws_utils.model_utils": ["numpy", "os", "shutil", "re", "glob", "torch", "yaml"], "modelscope.trainers.audio.kws_utils.runtime_utils": ["os", "shutil", "stat", "re", "sys", "collections", "codecs", "json"], "modelscope.trainers.audio.kws_utils.det_utils": ["numpy", "os", "kaldiio", "threading", "matplotlib", "glob", "torch", "json"], "modelscope.trainers.audio.kws_nearfield_trainer": ["tensorboardX", "copy", "os", "re", "typing", "yaml", "torch", "datetime"], "modelscope.trainers.utils.log_buffer": ["collections", "numpy"], "modelscope.trainers.utils.inference": ["logging", "tqdm", "os", "shutil", "torch", "pickle", "collections"], "modelscope.trainers.cv.card_detection_scrfd_trainer": [], "modelscope.trainers.cv.cartoon_translation_trainer": ["tqdm", "numpy", "os", "packaging", "tensorflow", "typing"], "modelscope.trainers.cv.image_portrait_enhancement_trainer": ["collections", "torch"], "modelscope.trainers.cv.referring_video_object_segmentation_trainer": ["torch", "os"], "modelscope.trainers.cv.image_detection_damoyolo_trainer": ["math", "time", "os", "typing", "torch", "easydict", "datetime"], "modelscope.trainers.cv.face_detection_scrfd_trainer": ["typing", "copy", "os", "time"], "modelscope.trainers.cv.image_classifition_trainer": ["numpy", "copy", "os", "time", "typing", "torch"], "modelscope.trainers.cv.ocr_detection_db_trainer": ["tqdm", "numpy", "copy", "math", "time", "os", "typing", "torch", "easydict", "datetime"], "modelscope.trainers.cv.image_instance_segmentation_trainer": [], "modelscope.trainers.cv.action_detection_trainer": ["os", "fvcore", "detectron2", "typing", "torch"], "modelscope.trainers.cv.nerf_recon_acc_trainer": ["tqdm", "numpy", "cv2", "time", "os", "random", "typing", "glob", "torch", "datetime"], "modelscope.trainers.cv.movie_scene_segmentation_trainer": [], "modelscope.trainers.cv.image_inpainting_trainer": ["collections", "torch", "time"], "modelscope.trainers.cv.image_defrcn_fewshot_detection_trainer": ["os", "detectron2", "typing", "torch", "collections"], "modelscope.trainers.cv.vision_efficient_tuning_trainer": ["typing", "torch"], "modelscope.trainers.cv.ocr_recognition_trainer": ["collections", "torch", "time"], "modelscope.trainers.nlp_trainer": ["numpy", "torch", "typing", "os"], "modelscope.trainers.builder": [], "modelscope.trainers.cli_argument_parser": ["argparse", "dataclasses", "typing"], "modelscope.trainers.lrscheduler.builder": ["packaging", "torch", "inspect"], "modelscope.trainers.lrscheduler.warmup.warmup": [], "modelscope.trainers.lrscheduler.warmup.base": ["torch"], "modelscope.trainers.hooks.hook": ["functools"], "modelscope.trainers.hooks.optimizer.apex_optimizer_hook": ["logging", "packaging", "torch"], "modelscope.trainers.hooks.optimizer.torch_optimizer_hook": ["logging"], "modelscope.trainers.hooks.optimizer.base": ["logging", "torch"], "modelscope.trainers.hooks.lr_scheduler_hook": [], "modelscope.trainers.hooks.checkpoint.checkpoint_hook": ["numpy", "os", "shutil", "random", "typing", "torch", "json"], "modelscope.trainers.hooks.checkpoint.checkpoint_processor": ["shutil", "re", "os"], "modelscope.trainers.hooks.checkpoint.load_checkpoint_hook": ["numpy", "packaging", "random", "typing", "torch"], "modelscope.trainers.hooks.builder": [], "modelscope.trainers.hooks.evaluation_hook": ["collections", "typing"], "modelscope.trainers.hooks.early_stop_hook": ["numpy"], "modelscope.trainers.hooks.iter_timer_hook": ["time"], "modelscope.trainers.hooks.priority": ["typing", "enum"], "modelscope.trainers.hooks.clip_clamp_logit_scale_hook": ["torch"], "modelscope.trainers.hooks.logger.text_logger_hook": ["os", "torch", "collections", "json", "datetime"], "modelscope.trainers.hooks.logger.tensorboard_hook": ["numpy", "torch", "os"], "modelscope.trainers.hooks.logger.base": ["numbers", "numpy", "abc", "torch"], "modelscope.trainers.hooks.distributed.deepspeed_hook": ["math", "os", "shutil", "functools", "deepspeed", "transformers", "torch", "megatron_util"], "modelscope.trainers.hooks.distributed.megatron_hook": ["shutil", "torch", "megatron_util", "os"], "modelscope.trainers.hooks.distributed.ddp_hook": [], "modelscope.trainers.hooks.compression.sparsity_hook": ["os"], "modelscope.trainers.hooks.compression.utils": ["torch"], "modelscope.trainers.training_args": ["copy", "re", "addict", "dataclasses", "typing", "json"], "modelscope.trainers.parallel.builder": ["torch"], "modelscope.trainers.parallel.utils": [], "modelscope.trainers.base": ["abc", "os", "typing", "time"], "modelscope.trainers.trainer": ["distutils", "copy", "os", "functools", "typing", "torch", "collections", "json", "inspect"], "modelscope.msdatasets.auth.auth_config": ["typing", "http"], "modelscope.msdatasets.download.download_config": ["typing", "datasets"], "modelscope.msdatasets.download.dataset_builder": ["os", "datasets", "pyarrow", "typing", "pandas"], "modelscope.msdatasets.download.download_manager": ["datasets"], "modelscope.msdatasets.audio.asr_dataset": [], "modelscope.msdatasets.dataset_cls.custom_datasets.image_quality_assessment_degradation.image_quality_assessment_degradation_dataset": ["torchvision"], "modelscope.msdatasets.dataset_cls.custom_datasets.mgeo_ranking_dataset": ["typing", "json", "torch", "random"], "modelscope.msdatasets.dataset_cls.custom_datasets.image_quality_assmessment_mos.image_quality_assessment_mos_dataset": [], "modelscope.msdatasets.dataset_cls.custom_datasets.video_super_resolution.video_super_resolution_dataset": ["collections", "numpy", "torch", "cv2"], "modelscope.msdatasets.dataset_cls.custom_datasets.easycv_base": ["os"], "modelscope.msdatasets.dataset_cls.custom_datasets.image_instance_segmentation_coco_dataset": ["numpy", "pycocotools", "os"], "modelscope.msdatasets.dataset_cls.custom_datasets.movie_scene_segmentation.movie_scene_segmentation_dataset": ["copy", "os", "random", "torch", "json", "torchvision"], "modelscope.msdatasets.dataset_cls.custom_datasets.movie_scene_segmentation.sampler": ["numpy", "random"], "modelscope.msdatasets.dataset_cls.custom_datasets.image_inpainting.aug": ["albumentations", "imgaug"], "modelscope.msdatasets.dataset_cls.custom_datasets.image_inpainting.image_inpainting_dataset": ["numpy", "cv2", "os", "albumentations", "glob", "enum"], "modelscope.msdatasets.dataset_cls.custom_datasets.damoyolo.build": ["bisect", "copy", "torch", "math"], "modelscope.msdatasets.dataset_cls.custom_datasets.damoyolo.transforms.build": [], "modelscope.msdatasets.dataset_cls.custom_datasets.damoyolo.transforms.transforms": ["numpy", "torch", "cv2", "random", "torchvision"], "modelscope.msdatasets.dataset_cls.custom_datasets.damoyolo.datasets.mosaic_wrapper": ["numpy", "math", "cv2", "random", "torch"], "modelscope.msdatasets.dataset_cls.custom_datasets.damoyolo.datasets.coco": ["numpy", "torch", "cv2", "torchvision"], "modelscope.msdatasets.dataset_cls.custom_datasets.damoyolo.evaluation.coco.coco_eval": ["collections", "torch", "tempfile", "os"], "modelscope.msdatasets.dataset_cls.custom_datasets.damoyolo.samplers.grouped_batch_sampler": ["torch", "itertools"], "modelscope.msdatasets.dataset_cls.custom_datasets.damoyolo.samplers.distributed": ["torch", "math"], "modelscope.msdatasets.dataset_cls.custom_datasets.damoyolo.samplers.iteration_based_batch_sampler": ["torch"], "modelscope.msdatasets.dataset_cls.custom_datasets.damoyolo.collate_batch": [], "modelscope.msdatasets.dataset_cls.custom_datasets.audio.kws_nearfield_processor": ["numpy", "kaldiio", "torchaudio", "random", "torch", "json"], "modelscope.msdatasets.dataset_cls.custom_datasets.audio.asr_dataset": ["os"], "modelscope.msdatasets.dataset_cls.custom_datasets.audio.kws_farfield_dataset": ["numpy", "math", "os", "threading", "torch", "queue"], "modelscope.msdatasets.dataset_cls.custom_datasets.audio.kws_nearfield_dataset": ["torch", "random"], "modelscope.msdatasets.dataset_cls.custom_datasets.referring_video_object_segmentation.transformers": ["PIL", "torch", "random", "torchvision"], "modelscope.msdatasets.dataset_cls.custom_datasets.referring_video_object_segmentation.referring_video_object_segmentation_dataset": ["tqdm", "numpy", "pycocotools", "os", "h5py", "glob", "torch", "pandas", "json", "torchvision"], "modelscope.msdatasets.dataset_cls.custom_datasets.language_guided_video_summarization_dataset": ["numpy", "os", "h5py", "torch", "json"], "modelscope.msdatasets.dataset_cls.custom_datasets.bad_image_detecting.bad_image_detecting_dataset": [], "modelscope.msdatasets.dataset_cls.custom_datasets.builder": [], "modelscope.msdatasets.dataset_cls.custom_datasets.reds_image_deblurring_dataset": ["numpy", "cv2"], "modelscope.msdatasets.dataset_cls.custom_datasets.image_colorization.image_colorization_dataset": ["numpy", "torch", "cv2"], "modelscope.msdatasets.dataset_cls.custom_datasets.sidd_image_denoising.sidd_image_denoising_dataset": ["numpy", "cv2"], "modelscope.msdatasets.dataset_cls.custom_datasets.sidd_image_denoising.transforms": ["random"], "modelscope.msdatasets.dataset_cls.custom_datasets.sidd_image_denoising.data_utils": ["torch", "cv2"], "modelscope.msdatasets.dataset_cls.custom_datasets.torch_custom_dataset": ["typing", "torch"], "modelscope.msdatasets.dataset_cls.custom_datasets.video_frame_interpolation.video_frame_interpolation_dataset": ["numpy", "torch", "cv2"], "modelscope.msdatasets.dataset_cls.custom_datasets.video_frame_interpolation.data_utils": ["torch", "cv2"], "modelscope.msdatasets.dataset_cls.custom_datasets.video_summarization_dataset": ["numpy", "os", "h5py", "torch", "json"], "modelscope.msdatasets.dataset_cls.custom_datasets.ocr_recognition_dataset": ["PIL", "numpy", "cv2", "os", "six", "lmdb", "torch", "json"], "modelscope.msdatasets.dataset_cls.custom_datasets.video_stabilization.video_stabilization_dataset": [], "modelscope.msdatasets.dataset_cls.custom_datasets.image_portrait_enhancement.data_utils": ["torch", "cv2"], "modelscope.msdatasets.dataset_cls.custom_datasets.image_portrait_enhancement.image_portrait_enhancement_dataset": ["numpy", "cv2"], "modelscope.msdatasets.dataset_cls.custom_datasets.veco_dataset": ["numpy", "datasets", "typing"], "modelscope.msdatasets.dataset_cls.custom_datasets.text_ranking_dataset": ["typing", "torch", "random"], "modelscope.msdatasets.dataset_cls.custom_datasets.gopro_image_deblurring_dataset": ["numpy", "cv2"], "modelscope.msdatasets.dataset_cls.custom_datasets.ocr_detection.processes.normalize_image": ["numpy", "torch"], "modelscope.msdatasets.dataset_cls.custom_datasets.ocr_detection.processes.random_crop_data": ["numpy", "cv2"], "modelscope.msdatasets.dataset_cls.custom_datasets.ocr_detection.processes.make_border_map": ["shapely", "numpy", "pyclipper", "cv2"], "modelscope.msdatasets.dataset_cls.custom_datasets.ocr_detection.processes.data_process": [], "modelscope.msdatasets.dataset_cls.custom_datasets.ocr_detection.processes.augment_data": ["numpy", "imgaug", "math", "cv2"], "modelscope.msdatasets.dataset_cls.custom_datasets.ocr_detection.processes.make_icdar_data": ["collections", "numpy", "torch", "cv2"], "modelscope.msdatasets.dataset_cls.custom_datasets.ocr_detection.processes.make_seg_detection_data": ["shapely", "numpy", "pyclipper", "cv2"], "modelscope.msdatasets.dataset_cls.custom_datasets.ocr_detection.data_loader": ["numpy", "math", "bisect", "torch", "imgaug"], "modelscope.msdatasets.dataset_cls.custom_datasets.ocr_detection.augmenter": ["imgaug"], "modelscope.msdatasets.dataset_cls.custom_datasets.ocr_detection.image_dataset": ["logging", "numpy", "math", "os", "cv2", "bisect", "functools", "glob", "torch"], "modelscope.msdatasets.dataset_cls.custom_datasets.ocr_detection.measures.iou_evaluator": ["shapely", "collections", "numpy"], "modelscope.msdatasets.dataset_cls.custom_datasets.ocr_detection.measures.quad_measurer": ["numpy"], "modelscope.msdatasets.dataset_cls.dataset": ["tqdm", "copy", "os", "datasets", "pandas"], "modelscope.msdatasets.utils.upload_utils": ["tqdm", "multiprocessing", "os"], "modelscope.msdatasets.utils.dataset_utils": ["collections", "typing", "pandas", "os"], "modelscope.msdatasets.utils.oss_utils": ["__future__", "multiprocessing", "os", "datasets", "oss2"], "modelscope.msdatasets.utils.maxcompute_utils": ["pandas", "math"], "modelscope.msdatasets.utils.delete_utils": [], "modelscope.msdatasets.ms_dataset": ["numpy", "os", "datasets", "typing", "warnings"], "modelscope.msdatasets.task_datasets.torch_base_dataset": [], "modelscope.msdatasets.task_datasets.sidd_image_denoising": [], "modelscope.msdatasets.task_datasets.reds_image_deblurring_dataset": [], "modelscope.msdatasets.task_datasets.video_summarization_dataset": [], "modelscope.msdatasets.task_datasets.gopro_image_deblurring_dataset": [], "modelscope.msdatasets.data_files.data_files_manager": ["typing", "datasets", "os"], "modelscope.msdatasets.data_loader.data_loader_manager": ["abc", "datasets", "enum", "os"], "modelscope.msdatasets.data_loader.data_loader": ["typing", "abc", "datasets", "os"], "modelscope.msdatasets.meta.data_meta_config": [], "modelscope.msdatasets.meta.data_meta_manager": ["os", "shutil", "datasets", "collections", "json"], "modelscope.msdatasets.context.dataset_context_config": ["typing"], "modelscope.exporters.nlp.model_for_token_classification_exporter": ["collections", "typing", "torch"], "modelscope.exporters.nlp.sbert_for_sequence_classification_exporter": ["collections", "typing", "torch"], "modelscope.exporters.nlp.csanmt_for_translation_exporter": ["typing", "tensorflow", "os"], "modelscope.exporters.nlp.sbert_for_zero_shot_classification_exporter": ["collections", "typing"], "modelscope.exporters.torch_model_exporter": ["itertools", "os", "contextlib", "typing", "torch"], "modelscope.exporters.multi_modal.stable_diffusion_exporter": ["onnx", "argparse", "shutil", "os", "packaging", "typing", "pathlib", "torch", "diffusers", "collections"], "modelscope.exporters.tf_model_exporter": ["typing", "tensorflow", "os"], "modelscope.exporters.audio.ans_dfsmn_exporter": ["torch", "os"], "modelscope.exporters.cv.object_detection_damoyolo_exporter": ["numpy", "os", "onnx", "functools", "typing", "torch"], "modelscope.exporters.cv.cartoon_translation_exporter": ["packaging", "typing", "tensorflow", "os"], "modelscope.exporters.cv.face_detection_scrfd_exporter": ["numpy", "os", "onnx", "functools", "typing", "torch"], "modelscope.exporters.builder": [], "modelscope.exporters.base": ["typing", "abc", "os"]}, "version": "1.8.4", "md5": "5995260f50b159a52d2b70bdd17d2ad6", "files_mtime": {"/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/ponet/configuration.py": 1693424429.9856606, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/ponet/tokenization.py": 1693424429.9866607, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/ponet/document_segmentation.py": 1693424429.9866607, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/ponet/backbone.py": 1693424429.9856606, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/ponet/fill_mask.py": 1693424429.9866607, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/chatglm/text_generation.py": 1693424429.9266567, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/chatglm/configuration.py": 1693424429.9256566, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/chatglm/quantization.py": 1693424429.9256566, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/chatglm/tokenization.py": 1693424429.9266567, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/text_classification.py": 1693424430.0006616, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/configuration.py": 1693424429.9996614, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/adv_utils.py": 1693424429.9986615, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/faq_question_answering.py": 1693424430.0006616, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/token_classification.py": 1693424430.0016618, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/backbone.py": 1693424429.9996614, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/structbert/fill_mask.py": 1693424430.0006616, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space_T_en/text_to_sql.py": 1693424429.9976614, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt_moe/text_generation.py": 1693424429.947658, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt_moe/configuration.py": 1693424429.9466581, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt_moe/moe/experts.py": 1693424429.9486582, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt_moe/moe/utils.py": 1693424429.9506583, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt_moe/moe/layer.py": 1693424429.9486582, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt_moe/moe/mappings.py": 1693424429.9496582, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt_moe/moe/sharded_moe.py": 1693424429.9496582, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt_moe/distributed_gpt_moe.py": 1693424429.947658, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt_moe/checkpointing.py": 1693424429.9466581, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt_moe/tokenizer.py": 1693424429.9486582, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt_moe/backbone.py": 1693424429.945658, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bart/text_error_correction.py": 1693424429.9176562, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/hf_transformers/backbone.py": 1693424429.9546585, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/dgds/document_grounded_dialog_rerank.py": 1693424429.9366574, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/dgds/document_grounded_dialog_retrieval.py": 1693424429.9366574, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/dgds/backbone.py": 1693424429.9356573, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/dgds/document_grounded_dialog_generate.py": 1693424429.9356573, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/configuration.py": 1693424429.989661, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/dialog_modeling.py": 1693424429.990661, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/dialog_intent_prediction.py": 1693424429.990661, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/model/generator.py": 1693424429.991661, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/model/unified_transformer.py": 1693424429.9936612, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/model/model_base.py": 1693424429.992661, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/model/intent_unified_transformer.py": 1693424429.992661, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/model/tokenization_space.py": 1693424429.9936612, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/model/gen_unified_transformer.py": 1693424429.991661, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/modules/feedforward.py": 1693424429.9946613, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/modules/embedder.py": 1693424429.9946613, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/modules/multihead_attention.py": 1693424429.9956613, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/modules/transformer_block.py": 1693424429.9956613, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/modules/functions.py": 1693424429.9946613, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space/dialog_state_tracking.py": 1693424429.990661, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/text_ranking.py": 1693424429.9216564, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/text_classification.py": 1693424429.9216564, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/configuration.py": 1693424429.9196563, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/sentence_embedding.py": 1693424429.9206564, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/word_alignment.py": 1693424429.9226565, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/document_segmentation.py": 1693424429.9196563, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/siamese_uie.py": 1693424429.9206564, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/token_classification.py": 1693424429.9216564, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/backbone.py": 1693424429.918656, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bert/fill_mask.py": 1693424429.9206564, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/peer/text_classification.py": 1693424429.9796603, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/peer/configuration.py": 1693424429.97866, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/peer/backbone.py": 1693424429.97866, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/peer/sas_utils.py": 1693424429.9796603, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/xlm_roberta/configuration.py": 1693424430.0096622, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/xlm_roberta/backbone.py": 1693424430.0096622, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/canmt/canmt_translation.py": 1693424429.9246566, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/canmt/canmt_model.py": 1693424429.9236565, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/canmt/sequence_generator.py": 1693424429.9246566, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt3/text_generation.py": 1693424429.944658, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt3/configuration.py": 1693424429.9436579, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt3/distributed_gpt3.py": 1693424429.944658, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt3/tokenizer.py": 1693424429.945658, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt3/backbone.py": 1693424429.9436579, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space_T_cn/table_question_answering.py": 1693424429.9976614, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space_T_cn/configuration.py": 1693424429.9966614, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/space_T_cn/backbone.py": 1693424429.9966614, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/text_ranking.py": 1693424430.0046618, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/text_generation.py": 1693424430.0046618, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/task_model.py": 1693424430.0036619, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/text_classification.py": 1693424430.0036619, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/information_extraction.py": 1693424430.0026617, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/feature_extraction.py": 1693424430.0026617, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/machine_reading_comprehension.py": 1693424430.0026617, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/token_classification.py": 1693424430.0046618, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/task_models/fill_mask.py": 1693424430.0026617, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/data_utils/file_utils.py": 1693424429.9686594, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/data_utils/sp_tokenizer.py": 1693424429.9696596, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/data_utils/extraction.py": 1693424429.9686594, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/data_utils/corpora.py": 1693424429.9676595, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/data_utils/lazy_loader.py": 1693424429.9696596, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/data_utils/tokenization.py": 1693424429.9706597, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/data_utils/tokenization_gpt2.py": 1693424429.9706597, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/data_utils/datasets.py": 1693424429.9676595, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/data_utils/wordpiece.py": 1693424429.9716597, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/data_utils/samplers.py": 1693424429.9696596, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/blocklm_utils.py": 1693424429.9636593, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/test/test_block.py": 1693424429.9756598, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/test/test_rel_shift.py": 1693424429.9756598, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/utils.py": 1693424429.9666593, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/process_grid.py": 1693424429.9656594, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/generation_utils.py": 1693424429.9646592, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/mglm_for_text_summarization.py": 1693424429.9646592, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/configure_data.py": 1693424429.9636593, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/model/modeling_bert.py": 1693424429.9736598, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/model/downstream.py": 1693424429.9726598, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/model/distributed.py": 1693424429.9726598, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/model/prompt.py": 1693424429.9736598, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/model/modeling_glm.py": 1693424429.9736598, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/model/transformer.py": 1693424429.97466, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/run_test.py": 1693424429.9656594, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/arguments.py": 1693424429.9636593, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/mglm/train_utils.py": 1693424429.9656594, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/deberta_v2/configuration.py": 1693424429.9336572, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/deberta_v2/tokenization.py": 1693424429.9346573, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/deberta_v2/tokenization_fast.py": 1693424429.9346573, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/deberta_v2/backbone.py": 1693424429.9336572, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/deberta_v2/fill_mask.py": 1693424429.9336572, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/lstm/token_classification.py": 1693424429.960659, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/lstm/backbone.py": 1693424429.960659, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/fid_plug/text_generation.py": 1693424429.9386575, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/fid_plug/configuration.py": 1693424429.9386575, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/fid_plug/backbone.py": 1693424429.9386575, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/veco/text_classification.py": 1693424430.0086622, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/veco/configuration.py": 1693424430.007662, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/veco/token_classification.py": 1693424430.0086622, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/veco/backbone.py": 1693424430.007662, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/veco/fill_mask.py": 1693424430.007662, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/plug_mental/text_classification.py": 1693424429.9836605, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/plug_mental/configuration.py": 1693424429.9836605, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/plug_mental/adv_utils.py": 1693424429.9826605, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/plug_mental/backbone.py": 1693424429.9836605, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/glm_130b/text_generation.py": 1693424429.9406576, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/glm_130b/initialize.py": 1693424429.9396577, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/glm_130b/quantization/functional.py": 1693424429.9416578, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/glm_130b/quantization/layers.py": 1693424429.9426577, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/glm_130b/generation/strategies.py": 1693424429.9406576, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt_neo/backbone.py": 1693424429.9506583, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/plug/generator.py": 1693424429.9816604, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/plug/configuration.py": 1693424429.9816604, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/plug/distributed_plug.py": 1693424429.9816604, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/plug/backbone.py": 1693424429.9806602, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/plug/AnnealingLR.py": 1693424429.9796603, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/llama2/text_generation.py": 1693424429.958659, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/llama2/configuration.py": 1693424429.958659, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/llama2/tokenization.py": 1693424429.9596589, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/llama2/tokenization_fast.py": 1693424429.9596589, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/llama2/backbone.py": 1693424429.9576588, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/text_generation_head.py": 1693424429.9526584, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/infromation_extraction_head.py": 1693424429.9526584, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/token_classification_head.py": 1693424429.9536586, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/text_classification_head.py": 1693424429.9526584, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/text_ranking_head.py": 1693424429.9536586, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/torch_pretrain_head.py": 1693424429.9536586, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/crf_head.py": 1693424429.9516585, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/heads/fill_mask_head.py": 1693424429.9516585, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/csanmt/translation.py": 1693424429.9326572, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/gpt2/backbone.py": 1693424429.9426577, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/palm_v2/text_generation.py": 1693424429.9776602, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/palm_v2/configuration.py": 1693424429.97666, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/palm_v2/dureader_eval.py": 1693424429.97666, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/bloom/backbone.py": 1693424429.9226565, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/fid_T5/text_generation.py": 1693424429.9376574, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/chatglm2/text_generation.py": 1693424429.9286568, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/chatglm2/configuration.py": 1693424429.927657, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/chatglm2/quantization.py": 1693424429.927657, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/chatglm2/tokenization.py": 1693424429.9286568, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/polylm/text_generation.py": 1693424429.9846606, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/codegeex/inference.py": 1693424429.930657, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/codegeex/codegeex_for_code_translation.py": 1693424429.930657, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/codegeex/codegeex.py": 1693424429.929657, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/codegeex/codegeex_for_code_generation.py": 1693424429.930657, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/codegeex/tokenizer.py": 1693424429.931657, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/T5/configuration.py": 1693424429.916656, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/T5/text2text_generation.py": 1693424429.9176562, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/T5/backbone.py": 1693424429.916656, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/megatron_bert/configuration.py": 1693424429.961659, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/megatron_bert/backbone.py": 1693424429.961659, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/megatron_bert/fill_mask.py": 1693424429.9626591, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/qwen/text_generation.py": 1693424429.9886608, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/qwen/qwen_generation_utils.py": 1693424429.9886608, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/qwen/configuration.py": 1693424429.9876606, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/qwen/tokenization.py": 1693424429.989661, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/qwen/backbone.py": 1693424429.9876606, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/llama/text_generation.py": 1693424429.9566586, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/llama/configuration.py": 1693424429.9556587, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/llama/convert_llama_weights_to_hf.py": 1693424429.9566586, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/llama/tokenization.py": 1693424429.9566586, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/llama/tokenization_fast.py": 1693424429.9576588, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/llama/backbone.py": 1693424429.9556587, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/use/user_satisfaction_estimation.py": 1693424430.006662, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/use/transformer.py": 1693424430.006662, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/unite/configuration.py": 1693424430.005662, "/usr/local/lib/python3.10/dist-packages/modelscope/models/nlp/unite/translation_evaluation.py": 1693424430.005662, "/usr/local/lib/python3.10/dist-packages/modelscope/models/base/base_head.py": 1693424429.4506254, "/usr/local/lib/python3.10/dist-packages/modelscope/models/base/base_model.py": 1693424429.4516256, "/usr/local/lib/python3.10/dist-packages/modelscope/models/base/base_torch_model.py": 1693424429.4516256, "/usr/local/lib/python3.10/dist-packages/modelscope/models/base/base_torch_head.py": 1693424429.4516256, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/dataset.py": 1693424430.0116622, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/data/utils.py": 1693424430.0156627, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/data/process.py": 1693424430.0136626, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/data/protein.py": 1693424430.0146625, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/data/residue_constants.py": 1693424430.0156627, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/data/process_multimer.py": 1693424430.0146625, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/data/data_ops.py": 1693424430.0126624, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/data/msa_pairing.py": 1693424430.0136626, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/config.py": 1693424430.0116622, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/model.py": 1693424430.0116622, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/msa/utils.py": 1693424430.023663, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/msa/tools/utils.py": 1693424430.0266633, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/msa/tools/hhsearch.py": 1693424430.0246632, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/msa/tools/kalign.py": 1693424430.0256631, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/msa/tools/hmmbuild.py": 1693424430.0246632, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/msa/tools/hhblits.py": 1693424430.0246632, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/msa/tools/jackhmmer.py": 1693424430.0256631, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/msa/tools/hmmsearch.py": 1693424430.0256631, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/msa/mmcif.py": 1693424430.021663, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/msa/pipeline.py": 1693424430.022663, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/msa/templates.py": 1693424430.022663, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/msa/parsers.py": 1693424430.021663, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/msa/msa_identifiers.py": 1693424430.021663, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/modules/frame.py": 1693424430.0196629, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/modules/attentions.py": 1693424430.0166626, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/modules/structure_module.py": 1693424430.0196629, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/modules/embedders.py": 1693424430.0186627, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/modules/auxillary_heads.py": 1693424430.0176628, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/modules/confidence.py": 1693424430.0176628, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/modules/triangle_multiplication.py": 1693424430.020663, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/modules/template.py": 1693424430.020663, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/modules/featurization.py": 1693424430.0186627, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/modules/common.py": 1693424430.0176628, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/modules/alphafold.py": 1693424430.0166626, "/usr/local/lib/python3.10/dist-packages/modelscope/models/science/unifold/modules/evoformer.py": 1693424430.0186627, "/usr/local/lib/python3.10/dist-packages/modelscope/models/builder.py": 1693424429.4266238, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_try_on/generator.py": 1693424429.6406379, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_try_on/landmark.py": 1693424429.6406379, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_try_on/warping.py": 1693424429.641638, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_try_on/try_on_infer.py": 1693424429.6406379, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vision_middleware/vim.py": 1693424429.840651, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vision_middleware/model.py": 1693424429.840651, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vision_middleware/head.py": 1693424429.840651, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vision_middleware/backbone.py": 1693424429.839651, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/cmdssl_video_embedding/c3d.py": 1693424429.4716268, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/cmdssl_video_embedding/resnet3d.py": 1693424429.4716268, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/cmdssl_video_embedding/resnet2p1d.py": 1693424429.4716268, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/facial_expression_recognition/fer/vgg.py": 1693424429.5296307, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/facial_expression_recognition/fer/transforms.py": 1693424429.5286305, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/facial_expression_recognition/fer/facial_expression_recognition.py": 1693424429.5286305, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/geometry/camera_utils.py": 1693424429.7686462, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/geometry/pose_utils.py": 1693424429.7686462, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/geometry/camera.py": 1693424429.7676463, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/geometry/pose.py": 1693424429.7686462, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/networks/depth_pose/depth_pose_net.py": 1693424429.7726467, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/networks/layers/resnet/layers.py": 1693424429.7736466, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/networks/layers/resnet/resnet_encoder.py": 1693424429.7746468, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/networks/layers/resnet/pose_decoder.py": 1693424429.7736466, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/networks/layers/resnet/depth_decoder.py": 1693424429.7736466, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/networks/optim/extractor.py": 1693424429.7746468, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/networks/optim/update.py": 1693424429.775647, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/dro_model.py": 1693424429.7666461, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/utils/image.py": 1693424429.777647, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/utils/config.py": 1693424429.7766469, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/utils/augmentations.py": 1693424429.775647, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/utils/horovod.py": 1693424429.7766469, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/utils/load.py": 1693424429.778647, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/utils/depth.py": 1693424429.7766469, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/utils/types.py": 1693424429.778647, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/utils/image_gt.py": 1693424429.777647, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/utils/misc.py": 1693424429.778647, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/configs/default_config.py": 1693424429.7676463, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/models/sfm_model_mf.py": 1693424429.7706466, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/models/model_wrapper.py": 1693424429.7706466, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/models/model_checkpoint.py": 1693424429.7696464, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/models/sup_model_mf.py": 1693424429.7716465, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_depth_estimation/models/model_utils.py": 1693424429.7696464, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_acc/network/utils.py": 1693424429.6636395, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_acc/network/nerf.py": 1693424429.6626394, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_acc/network/segmenter.py": 1693424429.6626394, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_acc/nerf_preprocess.py": 1693424429.6606393, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_acc/nerf_recon_acc.py": 1693424429.6606393, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_acc/dataloader/nerf_dataset.py": 1693424429.6616392, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_acc/dataloader/read_write_model.py": 1693424429.6616392, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/mogface/models/mogprednet.py": 1693424429.4846277, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/mogface/models/utils.py": 1693424429.4856277, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/mogface/models/mogface.py": 1693424429.4846277, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/mogface/models/resnet.py": 1693424429.4846277, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/mogface/models/detectors.py": 1693424429.4836276, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/mtcnn/models/first_stage.py": 1693424429.4866278, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/mtcnn/models/box_utils.py": 1693424429.4866278, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/mtcnn/models/detector.py": 1693424429.4866278, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/mtcnn/models/get_nets.py": 1693424429.487628, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/damofd_detect.py": 1693424429.4916282, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/core/bbox/transforms.py": 1693424429.4946284, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/core/post_processing/bbox_nms.py": 1693424429.4946284, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/datasets/pipelines/transforms.py": 1693424429.4976285, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/datasets/pipelines/formating.py": 1693424429.4966285, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/datasets/pipelines/auto_augment.py": 1693424429.4966285, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/datasets/pipelines/loading.py": 1693424429.4976285, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/datasets/retinaface.py": 1693424429.4956284, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/models/detectors/base.py": 1693424429.5016289, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/models/detectors/tinymog.py": 1693424429.5026288, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/models/detectors/scrfd.py": 1693424429.5016289, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/models/detectors/single_stage.py": 1693424429.5026288, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/models/backbones/master_net.py": 1693424429.4986286, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/models/backbones/resnet.py": 1693424429.4996285, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/models/backbones/mobilenet.py": 1693424429.4996285, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/mmdet_patch/models/dense_heads/scrfd_head.py": 1693424429.5006287, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/preprocessor.py": 1693424429.492628, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/tinymog_detect.py": 1693424429.492628, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/scrfd/scrfd_detect.py": 1693424429.492628, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/retinaface/utils.py": 1693424429.490628, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/retinaface/detection.py": 1693424429.489628, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/retinaface/models/net.py": 1693424429.490628, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/retinaface/models/retinaface.py": 1693424429.4916282, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/ulfd_slim/detection.py": 1693424429.503629, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/ulfd_slim/vision/box_utils.py": 1693424429.503629, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/ulfd_slim/vision/ssd/ssd.py": 1693424429.5066292, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/ulfd_slim/vision/ssd/mb_tiny_fd.py": 1693424429.505629, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/ulfd_slim/vision/ssd/data_preprocessing.py": 1693424429.505629, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/ulfd_slim/vision/ssd/predictor.py": 1693424429.5066292, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/ulfd_slim/vision/ssd/fd_config.py": 1693424429.505629, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/ulfd_slim/vision/transforms.py": 1693424429.504629, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/ulfd_slim/vision/mb_tiny.py": 1693424429.504629, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/peppa_pig_face/face_landmark.py": 1693424429.488628, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/peppa_pig_face/face_detector.py": 1693424429.487628, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/peppa_pig_face/facer.py": 1693424429.488628, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_detection/peppa_pig_face/LK/lk.py": 1693424429.489628, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/hand_static/hand_model.py": 1693424429.5316308, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/hand_static/networks.py": 1693424429.5316308, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/human_reconstruction/Reconstruction.py": 1693424429.532631, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/human_reconstruction/utils.py": 1693424429.532631, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/human_reconstruction/models/Surface_head.py": 1693424429.534631, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/human_reconstruction/models/geometry.py": 1693424429.535631, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/human_reconstruction/models/Embedding.py": 1693424429.5336308, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/human_reconstruction/models/human_segmenter.py": 1693424429.535631, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/human_reconstruction/models/Res_backbone.py": 1693424429.5336308, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/human_reconstruction/models/networks.py": 1693424429.535631, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/human_reconstruction/models/PixToMesh.py": 1693424429.5336308, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/human_reconstruction/models/detectors.py": 1693424429.534631, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_skychange/skychange.py": 1693424429.6276371, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_skychange/preprocessor.py": 1693424429.626637, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_skychange/ptsemseg/hrnet_super_and_ocr.py": 1693424429.628637, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_skychange/ptsemseg/BlockModules.py": 1693424429.6276371, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_skychange/ptsemseg/unet.py": 1693424429.6296372, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_skychange/ptsemseg/hrnet_backnone.py": 1693424429.628637, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_skychange/skychange_model.py": 1693424429.6276371, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_color_enhance/csrnet.py": 1693424429.5436316, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_color_enhance/image_color_enhance.py": 1693424429.5436316, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_color_enhance/deeplpf/deeplpf_image_color_enhance.py": 1693424429.5456316, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_color_enhance/deeplpf/deeplpfnet.py": 1693424429.5456316, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_color_enhance/adaint/adaint.py": 1693424429.5446317, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_frame_interpolation/interp_model/refinenet_arch.py": 1693424429.7836473, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_frame_interpolation/interp_model/transformer_layers.py": 1693424429.7836473, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_frame_interpolation/interp_model/UNet.py": 1693424429.7826474, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_frame_interpolation/interp_model/flow_reversal.py": 1693424429.7826474, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_frame_interpolation/interp_model/IFNet_swin.py": 1693424429.7816472, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_frame_interpolation/flow_model/raft.py": 1693424429.780647, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_frame_interpolation/flow_model/extractor.py": 1693424429.780647, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_frame_interpolation/flow_model/corr.py": 1693424429.780647, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_frame_interpolation/flow_model/update.py": 1693424429.7816472, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_frame_interpolation/VFINet_arch.py": 1693424429.778647, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_frame_interpolation/VFINet_for_video_frame_interpolation.py": 1693424429.779647, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_frame_interpolation/utils/utils.py": 1693424429.7846475, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_frame_interpolation/utils/scene_change_detection.py": 1693424429.7846475, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_portrait_enhancement/image_portrait_enhancement.py": 1693424429.6026354, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_portrait_enhancement/eqface/model_resnet.py": 1693424429.6036355, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_portrait_enhancement/eqface/fqa.py": 1693424429.6036355, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_portrait_enhancement/gpen.py": 1693424429.6026354, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_portrait_enhancement/align_faces.py": 1693424429.6016355, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_portrait_enhancement/retinaface/utils.py": 1693424429.6056356, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_portrait_enhancement/retinaface/detection.py": 1693424429.6056356, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_portrait_enhancement/retinaface/models/net.py": 1693424429.6066358, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_portrait_enhancement/retinaface/models/retinaface.py": 1693424429.6076357, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_portrait_enhancement/losses/losses.py": 1693424429.6046355, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_portrait_enhancement/losses/model_irse.py": 1693424429.6056356, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_portrait_enhancement/losses/helpers.py": 1693424429.6046355, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_multi_object_tracking/tracker/matching.py": 1693424429.7976482, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_multi_object_tracking/tracker/multitracker.py": 1693424429.7976482, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_multi_object_tracking/tracker/basetrack.py": 1693424429.7966483, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_multi_object_tracking/utils/image.py": 1693424429.7986484, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_multi_object_tracking/utils/utils.py": 1693424429.7996483, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_multi_object_tracking/utils/kalman_filter.py": 1693424429.7986484, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_multi_object_tracking/utils/visualization.py": 1693424429.7996483, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_multi_object_tracking/models/yolo.py": 1693424429.7966483, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_multi_object_tracking/models/model.py": 1693424429.795648, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_multi_object_tracking/models/decode.py": 1693424429.795648, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_multi_object_tracking/models/common.py": 1693424429.795648, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/ddpm_seg/utils.py": 1693424429.6186364, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/ddpm_seg/pixel_classifier.py": 1693424429.6186364, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/ddpm_seg/data_util.py": 1693424429.6176364, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/ddpm_seg/feature_extractors.py": 1693424429.6176364, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/ddpm_segmentation_model.py": 1693424429.6166363, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/semantic_seg_model.py": 1693424429.6166363, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/pan_merge/base_panoptic_fusion_head.py": 1693424429.6196365, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/pan_merge/maskformer_semantic_head.py": 1693424429.6196365, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/vit_adapter/utils/data_process_func.py": 1693424429.625637, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/vit_adapter/utils/seg_func.py": 1693424429.625637, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/vit_adapter/utils/builder.py": 1693424429.625637, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/vit_adapter/models/decode_heads/base_decode_head.py": 1693424429.6226368, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/vit_adapter/models/decode_heads/mask2former_head_from_mmseg.py": 1693424429.6236367, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/vit_adapter/models/segmentors/base_segmentor.py": 1693424429.624637, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/vit_adapter/models/segmentors/encoder_decoder_mask2former.py": 1693424429.624637, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/vit_adapter/models/backbone/base/beit.py": 1693424429.6226368, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/vit_adapter/models/backbone/beit_adapter.py": 1693424429.6216366, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_semantic_segmentation/vit_adapter/models/backbone/adapter_modules.py": 1693424429.6216366, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_summarization/summarizer.py": 1693424429.8296504, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_summarization/kts/cpd_nonlin.py": 1693424429.8306503, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_summarization/kts/cpd_auto.py": 1693424429.8306503, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_summarization/pgl_sum.py": 1693424429.8296504, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_summarization/base_model.py": 1693424429.8286502, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_human_hand_detection/nanodet_plus_head.py": 1693424429.5136297, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_human_hand_detection/utils.py": 1693424429.5146296, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_human_hand_detection/det_infer.py": 1693424429.5126295, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_human_hand_detection/ghost_pan.py": 1693424429.5126295, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_human_hand_detection/shufflenetv2.py": 1693424429.5136297, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_human_hand_detection/one_stage_detector.py": 1693424429.5136297, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_body_reshaping/image_body_reshaping.py": 1693424429.5376313, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_body_reshaping/model.py": 1693424429.5386312, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_body_reshaping/pose_estimator/model.py": 1693424429.5396314, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_body_reshaping/pose_estimator/body.py": 1693424429.5396314, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_body_reshaping/pose_estimator/util.py": 1693424429.5406313, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_body_reshaping/slim_utils.py": 1693424429.5386312, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_body_reshaping/person_info.py": 1693424429.5386312, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_defrcn_fewshot/defrcn_for_fewshot.py": 1693424429.5536323, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_defrcn_fewshot/utils/model_surgery_op.py": 1693424429.5576324, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_defrcn_fewshot/utils/voc_register.py": 1693424429.5586326, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_defrcn_fewshot/utils/register_data.py": 1693424429.5576324, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_defrcn_fewshot/utils/configuration_mapper.py": 1693424429.5576324, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_defrcn_fewshot/utils/coco_register.py": 1693424429.5576324, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_defrcn_fewshot/utils/requirements_check.py": 1693424429.5586326, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_defrcn_fewshot/models/defrcn.py": 1693424429.5556324, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_defrcn_fewshot/models/gdl.py": 1693424429.5566325, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_defrcn_fewshot/models/roi_heads.py": 1693424429.5566325, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_defrcn_fewshot/models/resnet.py": 1693424429.5566325, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_defrcn_fewshot/models/calibration_layer.py": 1693424429.5556324, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_defrcn_fewshot/models/fast_rcnn.py": 1693424429.5556324, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_defrcn_fewshot/evaluation/pascal_voc_evaluation.py": 1693424429.5546322, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_defrcn_fewshot/evaluation/coco_evaluation.py": 1693424429.5536323, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_defrcn_fewshot/evaluation/evaluator.py": 1693424429.5546322, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_matching/quadtree_attention_model.py": 1693424429.5906346, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_matching/utils/misc.py": 1693424429.596635, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_matching/loftr_quadtree/backbone/resnet_fpn.py": 1693424429.5936348, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_matching/loftr_quadtree/loftr.py": 1693424429.5926347, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_matching/loftr_quadtree/loftr_module/quadtree_attention.py": 1693424429.594635, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_matching/loftr_quadtree/loftr_module/fine_preprocess.py": 1693424429.5936348, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_matching/loftr_quadtree/loftr_module/linear_attention.py": 1693424429.594635, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_matching/loftr_quadtree/loftr_module/transformer.py": 1693424429.594635, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_matching/loftr_quadtree/utils/fine_matching.py": 1693424429.596635, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_matching/loftr_quadtree/utils/position_encoding.py": 1693424429.596635, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_matching/loftr_quadtree/utils/coarse_matching.py": 1693424429.595635, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_matching/config/default.py": 1693424429.5916348, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/open_vocabulary_detection_vild/vild.py": 1693424429.6986418, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/shop_segmentation/neck_fpn.py": 1693424429.7206433, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/shop_segmentation/utils.py": 1693424429.7216432, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/shop_segmentation/shop_seg_base.py": 1693424429.7206433, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/shop_segmentation/head_fpn.py": 1693424429.719643, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/shop_segmentation/models.py": 1693424429.7206433, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/shop_segmentation/shop_seg_model.py": 1693424429.7216432, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/shop_segmentation/common.py": 1693424429.719643, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/skin_retouching/utils.py": 1693424429.7226434, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/skin_retouching/detection_model/detection_unet_in.py": 1693424429.7236433, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/skin_retouching/detection_model/detection_module.py": 1693424429.7236433, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/skin_retouching/weights_init.py": 1693424429.7226434, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/skin_retouching/unet_deploy.py": 1693424429.7226434, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/skin_retouching/retinaface/net.py": 1693424429.7256436, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/skin_retouching/retinaface/network.py": 1693424429.7266436, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/skin_retouching/retinaface/prior_box.py": 1693424429.7266436, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/skin_retouching/retinaface/utils.py": 1693424429.7276437, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/skin_retouching/retinaface/box_utils.py": 1693424429.7256436, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/skin_retouching/retinaface/predict_single.py": 1693424429.7266436, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/skin_retouching/inpainting_model/gconv.py": 1693424429.7246435, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/skin_retouching/inpainting_model/inpainting_unet.py": 1693424429.7246435, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/utils.py": 1693424429.747645, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/detector.py": 1693424429.746645, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/apis/detector_evaluater.py": 1693424429.748645, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/apis/detector_inference.py": 1693424429.748645, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/detectors/detector.py": 1693424429.7606459, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/augmentations/box_level_augs/geometric_augs.py": 1693424429.7506452, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/augmentations/box_level_augs/box_level_augs.py": 1693424429.749645, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/augmentations/box_level_augs/color_augs.py": 1693424429.7506452, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/augmentations/box_level_augs/gaussian_maps.py": 1693424429.7506452, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/augmentations/scale_aware_aug.py": 1693424429.749645, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/base_models/core/neck_ops.py": 1693424429.7536454, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/base_models/core/utils.py": 1693424429.7556455, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/base_models/core/weight_init.py": 1693424429.7556455, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/base_models/core/base_ops.py": 1693424429.7536454, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/base_models/core/repvgg_block.py": 1693424429.7546453, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/base_models/core/ota_assigner.py": 1693424429.7546453, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/base_models/core/ops.py": 1693424429.7546453, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/base_models/backbones/tinynas_csp.py": 1693424429.7526453, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/base_models/backbones/tinynas_res.py": 1693424429.7526453, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/base_models/backbones/darknet.py": 1693424429.7516453, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/base_models/necks/giraffe_fpn_btn.py": 1693424429.7596457, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/base_models/necks/giraffe_config.py": 1693424429.7586458, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/base_models/necks/giraffe_fpn.py": 1693424429.7596457, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/base_models/heads/zero_head.py": 1693424429.7566454, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/base_models/heads/gfocal_v2_tiny.py": 1693424429.7566454, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/base_models/losses/gfocal_loss.py": 1693424429.7586458, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/base_models/losses/distill_loss.py": 1693424429.7576456, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/utils/boxes.py": 1693424429.762646, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/utils/scheduler.py": 1693424429.7636461, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/utils/model_utils.py": 1693424429.762646, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/structures/image_list.py": 1693424429.7616458, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/structures/boxlist_ops.py": 1693424429.7616458, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/damo/structures/bounding_box.py": 1693424429.7606459, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/tinynas_detector.py": 1693424429.746645, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_detection/tinynas_damoyolo.py": 1693424429.746645, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_panoptic_segmentation/panseg_model.py": 1693424429.6016355, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_detection/utils.py": 1693424429.688641, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_detection/preprocessor.py": 1693424429.688641, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_detection/model.py": 1693424429.688641, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_detection/modules/seg_detector_loss.py": 1693424429.6916413, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_detection/modules/mix_ops.py": 1693424429.6906412, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_detection/modules/dbnet.py": 1693424429.6896412, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_detection/modules/layers.py": 1693424429.6906412, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_detection/modules/proxyless.py": 1693424429.6906412, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/body_2d_keypoints/hrnet_basic_modules.py": 1693424429.461626, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/body_2d_keypoints/hrnet_v2.py": 1693424429.461626, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/body_2d_keypoints/w48.py": 1693424429.4626262, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_single_object_tracking/tracker/procontext.py": 1693424429.8166494, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_single_object_tracking/tracker/ostrack.py": 1693424429.8166494, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_single_object_tracking/utils/utils.py": 1693424429.8176496, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_single_object_tracking/config/ostrack.py": 1693424429.8106492, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_single_object_tracking/models/layers/attn_blocks.py": 1693424429.811649, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_single_object_tracking/models/layers/attn.py": 1693424429.811649, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_single_object_tracking/models/layers/patch_embed.py": 1693424429.8126493, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_single_object_tracking/models/layers/head.py": 1693424429.8126493, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_single_object_tracking/models/ostrack/utils.py": 1693424429.8136494, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_single_object_tracking/models/ostrack/base_backbone.py": 1693424429.8136494, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_single_object_tracking/models/ostrack/ostrack.py": 1693424429.8136494, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_single_object_tracking/models/ostrack/vit_ce.py": 1693424429.8146493, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_single_object_tracking/models/procontext/procontext.py": 1693424429.8146493, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_single_object_tracking/models/procontext/utils.py": 1693424429.8156495, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_single_object_tracking/models/procontext/vit_ce.py": 1693424429.8156495, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/indoor_layout_estimation/networks/utils.py": 1693424429.6436381, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/indoor_layout_estimation/networks/backbone/resnet_DA.py": 1693424429.6436381, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/indoor_layout_estimation/networks/backbone/vit_horizon_pry_image.py": 1693424429.6446383, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/indoor_layout_estimation/networks/modality/layout.py": 1693424429.6466384, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/indoor_layout_estimation/networks/panovit.py": 1693424429.642638, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/indoor_layout_estimation/networks/misc/post_proc.py": 1693424429.6456382, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/indoor_layout_estimation/networks/misc/fourier.py": 1693424429.6446383, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/indoor_layout_estimation/networks/misc/panostretch.py": 1693424429.6456382, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/indoor_layout_estimation/panovit.py": 1693424429.642638, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_classification/utils.py": 1693424429.5416315, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_classification/backbones/nextvit.py": 1693424429.5426314, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_classification/backbones/beit_v2.py": 1693424429.5426314, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_classification/resnet50_cc.py": 1693424429.5416315, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_classification/mmcls_model.py": 1693424429.5406313, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_object_segmentation/inference_memory_bank.py": 1693424429.8016486, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_object_segmentation/modules.py": 1693424429.8026485, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_object_segmentation/eval_network.py": 1693424429.8006485, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_object_segmentation/network.py": 1693424429.8026485, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_object_segmentation/inference_core.py": 1693424429.8006485, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_object_segmentation/aggregate.py": 1693424429.7996483, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_object_segmentation/model.py": 1693424429.8016486, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_object_segmentation/cbam.py": 1693424429.8006485, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_object_segmentation/mod_resnet.py": 1693424429.8016486, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_inpainting/inpainting.py": 1693424429.7876475, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_inpainting/inpainting_model.py": 1693424429.7886477, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_depth_estimation_bts/networks/utils.py": 1693424429.565633, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_depth_estimation_bts/networks/encoder.py": 1693424429.565633, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_depth_estimation_bts/networks/decoder.py": 1693424429.565633, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_depth_estimation_bts/networks/bts_model.py": 1693424429.564633, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_depth_estimation_bts/depth_estimation_bts_model.py": 1693424429.564633, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/bad_image_detecting/bad_image_detecting.py": 1693424429.4606261, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_attribute_recognition/fair_face/face_attribute_recognition.py": 1693424429.4826276, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_reconstruction/utils.py": 1693424429.5206301, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_reconstruction/models/pix2pix/pix2pix_model.py": 1693424429.5276306, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_reconstruction/models/pix2pix/pix2pix_options.py": 1693424429.5276306, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_reconstruction/models/pix2pix/networks.py": 1693424429.5266304, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_reconstruction/models/renderer.py": 1693424429.5246303, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_reconstruction/models/facelandmark/large_base_lmks_infer.py": 1693424429.5256305, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_reconstruction/models/facelandmark/nets/large_eyeball_net.py": 1693424429.5266304, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_reconstruction/models/facelandmark/nets/large_base_lmks_net.py": 1693424429.5256305, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_reconstruction/models/de_retouching_module.py": 1693424429.52163, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_reconstruction/models/opt.py": 1693424429.5236301, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_reconstruction/models/losses.py": 1693424429.5226302, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_reconstruction/models/nv_diffrast.py": 1693424429.5236301, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_reconstruction/models/networks.py": 1693424429.5226302, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_reconstruction/models/unet.py": 1693424429.5246303, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_reconstruction/models/bfm.py": 1693424429.52163, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_reconstruction/models/facerecon_model.py": 1693424429.5226302, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_colorization/unet/utils.py": 1693424429.5516322, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_colorization/unet/unet.py": 1693424429.550632, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_colorization/ddcolor/loss.py": 1693424429.5476317, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_colorization/ddcolor/ddcolor_for_image_colorization.py": 1693424429.5476317, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_colorization/ddcolor/utils/vgg.py": 1693424429.550632, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_colorization/ddcolor/utils/position_encoding.py": 1693424429.548632, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_colorization/ddcolor/utils/transformer_utils.py": 1693424429.5496318, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_colorization/ddcolor/utils/unet.py": 1693424429.5496318, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_colorization/ddcolor/utils/convnext.py": 1693424429.548632, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_colorization/ddcolor/ddcolor.py": 1693424429.5466318, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/panorama_depth_estimation/networks/unifuse.py": 1693424429.7006419, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/panorama_depth_estimation/networks/equi.py": 1693424429.6996417, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/panorama_depth_estimation/networks/layers.py": 1693424429.6996417, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/panorama_depth_estimation/networks/resnet.py": 1693424429.7006419, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/panorama_depth_estimation/networks/mobilenet.py": 1693424429.7006419, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/panorama_depth_estimation/networks/util.py": 1693424429.701642, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/panorama_depth_estimation/unifuse_model.py": 1693424429.6986418, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_mvs_depth_estimation/utils.py": 1693424429.5996351, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_mvs_depth_estimation/cas_mvsnet.py": 1693424429.597635, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_mvs_depth_estimation/depth_filter.py": 1693424429.5986352, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_mvs_depth_estimation/module.py": 1693424429.5996351, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_mvs_depth_estimation/casmvs_model.py": 1693424429.5986352, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_mvs_depth_estimation/colmap2mvsnet.py": 1693424429.5986352, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_mvs_depth_estimation/general_eval_dataset.py": 1693424429.5996351, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_inpainting/base.py": 1693424429.5776339, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_inpainting/refinement.py": 1693424429.5786338, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_inpainting/model.py": 1693424429.5786338, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_inpainting/modules/perceptual.py": 1693424429.5806339, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_inpainting/modules/ffc.py": 1693424429.5806339, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_inpainting/modules/inception.py": 1693424429.5806339, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_inpainting/modules/ade20k/base.py": 1693424429.581634, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_inpainting/modules/ade20k/resnet.py": 1693424429.5826342, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_inpainting/modules/feature_matching.py": 1693424429.579634, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_inpainting/modules/pix2pixhd.py": 1693424429.581634, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_inpainting/modules/adversarial.py": 1693424429.579634, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_inpainting/default.py": 1693424429.5776339, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_super_resolution/basicvsr_net.py": 1693424429.8316505, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_super_resolution/real_basicvsr_net.py": 1693424429.8326507, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_super_resolution/real_basicvsr_for_video_super_resolution.py": 1693424429.8326507, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_super_resolution/msrresnet_lite_model.py": 1693424429.8326507, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_super_resolution/common.py": 1693424429.8316505, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/annotator/mlsd/utils.py": 1693424429.4786272, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/annotator/mlsd/mbv2_mlsd_large.py": 1693424429.4776273, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/annotator/annotator.py": 1693424429.4736269, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/annotator/midas/utils.py": 1693424429.474627, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/annotator/midas/api.py": 1693424429.474627, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/annotator/midas/midas/midas_net.py": 1693424429.476627, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/annotator/midas/midas/dpt_depth.py": 1693424429.4756272, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/annotator/midas/midas/blocks.py": 1693424429.4756272, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/annotator/midas/midas/transforms.py": 1693424429.476627, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/annotator/midas/midas/midas_net_custom.py": 1693424429.476627, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/annotator/midas/midas/base_model.py": 1693424429.4756272, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/annotator/midas/midas/vit.py": 1693424429.4776273, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/annotator/openpose/hand.py": 1693424429.4796274, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/annotator/openpose/model.py": 1693424429.4796274, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/annotator/openpose/body.py": 1693424429.4796274, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/annotator/openpose/util.py": 1693424429.4806273, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/controllable_image_generation/controlnet.py": 1693424429.472627, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/text_to_360panorama_image/pipeline_sr.py": 1693424429.7416446, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/text_to_360panorama_image/pipeline_base.py": 1693424429.7416446, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_translation/data/transforms.py": 1693424429.6346376, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_translation/ops/svd.py": 1693424429.639638, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_translation/ops/utils.py": 1693424429.639638, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_translation/ops/apps.py": 1693424429.6366377, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_translation/ops/losses.py": 1693424429.6376376, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_translation/ops/degradation.py": 1693424429.6366377, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_translation/ops/random_mask.py": 1693424429.6386378, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_translation/ops/random_color.py": 1693424429.6386378, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_translation/ops/diffusion.py": 1693424429.6376376, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_translation/ops/metrics.py": 1693424429.6386378, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_translation/model_translation.py": 1693424429.6336374, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_translation/models/clip.py": 1693424429.6356375, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_translation/models/autoencoder.py": 1693424429.6356375, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_human_parsing/backbone/deeplab_resnet.py": 1693424429.5756338, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_human_parsing/m2fp/m2fp_encoder.py": 1693424429.5766337, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_human_parsing/m2fp/m2fp_decoder.py": 1693424429.5766337, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_human_parsing/m2fp_net.py": 1693424429.5746336, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_human_parsing/parsing_utils.py": 1693424429.5756338, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/s2net_panorama_depth_estimation/networks/util_helper.py": 1693424429.715643, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/s2net_panorama_depth_estimation/networks/config.py": 1693424429.7136428, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/s2net_panorama_depth_estimation/networks/model.py": 1693424429.7146428, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/s2net_panorama_depth_estimation/networks/swin_transformer.py": 1693424429.7146428, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/s2net_panorama_depth_estimation/networks/resnet.py": 1693424429.7146428, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/s2net_panorama_depth_estimation/networks/decoder.py": 1693424429.7136428, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/s2net_panorama_depth_estimation/s2net_model.py": 1693424429.7126427, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_quality_assessment_mos/censeo_ivqa_model.py": 1693424429.611636, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_quality_assessment_mos/backbones/resnet.py": 1693424429.612636, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_quality_assessment_mos/image_quality_assessment_mos.py": 1693424429.611636, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_quality_assessment_mos/heads/simple_head.py": 1693424429.6136363, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_probing_model/utils.py": 1693424429.608636, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_probing_model/model.py": 1693424429.608636, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_probing_model/backbone.py": 1693424429.6076357, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_ms/backbones/vit.py": 1693424429.67064, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_ms/necks/fpn.py": 1693424429.67264, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_ms/roi_heads/mask_heads/fcn_mask_head.py": 1693424429.6746402, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_ms/roi_heads/bbox_heads/convfc_bbox_head.py": 1693424429.67364, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_ms/dense_heads/anchor_head.py": 1693424429.67164, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_ms/dense_heads/rpn_head.py": 1693424429.67164, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_ms/utils/checkpoint.py": 1693424429.6756403, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_ms/utils/convModule_norm.py": 1693424429.6756403, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection/mmdet_model.py": 1693424429.6696398, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_classfication/master_net.py": 1693424429.7436447, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_classfication/plain_net_utils.py": 1693424429.7446449, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_classfication/global_utils.py": 1693424429.7436447, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_classfication/super_res_kxkx.py": 1693424429.7456448, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_classfication/super_blocks.py": 1693424429.7446449, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_classfication/super_res_k1kxk1.py": 1693424429.7456448, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_classfication/model_zoo.py": 1693424429.7436447, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_classfication/basic_blocks.py": 1693424429.7426445, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/tinynas_classfication/super_res_idwexkx.py": 1693424429.7446449, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_paintbyexample/model.py": 1693424429.6006353, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vision_efficient_tuning/model.py": 1693424429.8376508, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vision_efficient_tuning/timm_weight_init.py": 1693424429.838651, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vision_efficient_tuning/petl.py": 1693424429.8376508, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vision_efficient_tuning/vision_efficient_tuning.py": 1693424429.839651, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vision_efficient_tuning/timm_vision_transformer.py": 1693424429.838651, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vision_efficient_tuning/head.py": 1693424429.8376508, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vision_efficient_tuning/backbone.py": 1693424429.8366508, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vision_efficient_tuning/timm_helpers.py": 1693424429.838651, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/movie_scene_segmentation/model.py": 1693424429.6526387, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/movie_scene_segmentation/get_model.py": 1693424429.6526387, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/movie_scene_segmentation/utils/shot_encoder.py": 1693424429.6546388, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/movie_scene_segmentation/utils/trn.py": 1693424429.6546388, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/movie_scene_segmentation/utils/head.py": 1693424429.6536388, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/movie_scene_segmentation/utils/save_op.py": 1693424429.6536388, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/robust_image_classification/easyrobust_model.py": 1693424429.7116425, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_streaming_perception/longshortnet/exp/longshortnet_base.py": 1693424429.8266501, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_streaming_perception/longshortnet/longshortnet.py": 1693424429.8256502, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_streaming_perception/longshortnet/models/longshort_backbone_neck.py": 1693424429.8286502, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_streaming_perception/longshortnet/models/longshort.py": 1693424429.8276503, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_streaming_perception/longshortnet/models/dfp_pafpn_short.py": 1693424429.8276503, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_streaming_perception/longshortnet/models/dfp_pafpn_long.py": 1693424429.8276503, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_quality_assessment_degradation/degradation_model.py": 1693424429.6096358, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_quality_assessment_degradation/image_quality_assessment_degradation.py": 1693424429.6096358, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/stream_yolo/exp/build.py": 1693424429.7296438, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/stream_yolo/exp/default/streamyolo.py": 1693424429.7306437, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/stream_yolo/exp/base_exp.py": 1693424429.7296438, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/stream_yolo/exp/yolox_base.py": 1693424429.7296438, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/stream_yolo/data/data_augment.py": 1693424429.7286437, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/stream_yolo/realtime_video_detector.py": 1693424429.7276437, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/stream_yolo/utils/boxes.py": 1693424429.733644, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/stream_yolo/utils/format.py": 1693424429.733644, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/stream_yolo/models/tal_head.py": 1693424429.732644, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/stream_yolo/models/streamyolo.py": 1693424429.732644, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/stream_yolo/models/network_blocks.py": 1693424429.731644, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/stream_yolo/models/darknet.py": 1693424429.731644, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/stream_yolo/models/dfp_pafpn.py": 1693424429.731644, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_vq_compression/renderer.py": 1693424429.6646395, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_vq_compression/network/weighted_vq.py": 1693424429.6686397, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_vq_compression/network/tensoRF_VQ.py": 1693424429.6676397, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_vq_compression/network/tensorBase.py": 1693424429.6686397, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_vq_compression/network/tensoRF.py": 1693424429.6676397, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_vq_compression/utils.py": 1693424429.6646395, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_vq_compression/nerf_recon_vq_compression.py": 1693424429.6636395, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_vq_compression/dataloader/ray_utils.py": 1693424429.6666396, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_vq_compression/dataloader/tankstemple.py": 1693424429.6666396, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_vq_compression/dataloader/llff.py": 1693424429.6656396, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_vq_compression/dataloader/blender.py": 1693424429.6656396, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_vq_compression/dataloader/nsvf.py": 1693424429.6666396, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/body_3d_keypoints/cannonical_pose/canonical_pose_modules.py": 1693424429.4636264, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/body_3d_keypoints/cannonical_pose/body_3d_pose.py": 1693424429.4636264, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/body_3d_keypoints/hdformer/block.py": 1693424429.4646263, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/body_3d_keypoints/hdformer/hdformer.py": 1693424429.4656265, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/body_3d_keypoints/hdformer/hdformer_detector.py": 1693424429.4656265, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/body_3d_keypoints/hdformer/directed_graph.py": 1693424429.4646263, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/body_3d_keypoints/hdformer/backbone.py": 1693424429.4646263, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/body_3d_keypoints/hdformer/skeleton.py": 1693424429.4656265, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/virual_tryon/sdafnet.py": 1693424429.8366508, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/product_segmentation/seg_infer.py": 1693424429.7066422, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/product_segmentation/net.py": 1693424429.7056422, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_4k/network/utils.py": 1693424429.6596391, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_4k/network/dvgo.py": 1693424429.6596391, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_4k/nerf_recon_4k.py": 1693424429.655639, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_4k/nerf_preprocess.py": 1693424429.655639, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_4k/dataloader/load_data.py": 1693424429.656639, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_4k/dataloader/load_llff.py": 1693424429.657639, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_4k/dataloader/load_blender.py": 1693424429.656639, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_4k/dataloader/load_tankstemple.py": 1693424429.657639, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/nerf_recon_4k/dataloader/read_write_model.py": 1693424429.657639, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/abnormal_object_detection/mmdet_ms/roi_head/roi_extractors/single_level_roi_extractor.py": 1693424429.4546256, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/abnormal_object_detection/mmdet_ms/roi_head/mask_scoring_roi_head.py": 1693424429.4536257, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/abnormal_object_detection/mmdet_model.py": 1693424429.4526255, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_face_fusion/image_face_fusion.py": 1693424429.5676332, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_face_fusion/network/aei_flow_net.py": 1693424429.5726335, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_face_fusion/network/model_irse.py": 1693424429.5736334, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_face_fusion/network/aad_layer.py": 1693424429.5716333, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_face_fusion/network/bfm.py": 1693424429.5726335, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_face_fusion/network/facerecon_model.py": 1693424429.5736334, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_face_fusion/network/dense_motion.py": 1693424429.5726335, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_face_fusion/network/ops.py": 1693424429.5746336, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_face_fusion/facegan/gpen_model.py": 1693424429.568633, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_face_fusion/facegan/op/conv2d_gradfix.py": 1693424429.5696332, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_face_fusion/facegan/op/upfirdn2d.py": 1693424429.5706334, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_face_fusion/facegan/op/fused_act.py": 1693424429.5696332, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_face_fusion/facegan/face_gan.py": 1693424429.568633, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_face_fusion/facelib/matlab_cp2tform.py": 1693424429.5716333, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_face_fusion/facelib/align_trans.py": 1693424429.5706334, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_generation/data/transforms.py": 1693424429.6306372, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_generation/ops/losses.py": 1693424429.6326375, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_generation/ops/diffusion.py": 1693424429.6326375, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_generation/model.py": 1693424429.6306372, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_generation/models/clip.py": 1693424429.6316373, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_to_image_generation/models/autoencoder.py": 1693424429.6316373, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vop_retrieval/tokenization_clip.py": 1693424429.8436513, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vop_retrieval/model_se.py": 1693424429.8426511, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vop_retrieval/model.py": 1693424429.8426511, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vop_retrieval/basic_utils.py": 1693424429.8416512, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vop_retrieval/backbone.py": 1693424429.8416512, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/motion_generation/model.py": 1693424429.6496384, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/motion_generation/modules/cfg_sampler.py": 1693424429.6506386, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/motion_generation/modules/smpl.py": 1693424429.6516387, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/motion_generation/modules/mdm.py": 1693424429.6506386, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/motion_generation/modules/rotation2xyz.py": 1693424429.6516387, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/motion_generation/modules/respace.py": 1693424429.6516387, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/motion_generation/modules/gaussian_diffusion.py": 1693424429.6506386, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_restoration/demoire_models/nets.py": 1693424429.6156363, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_restoration/image_restoration_model.py": 1693424429.6156363, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/pedestrian_attribute_recognition/model.py": 1693424429.702642, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/product_retrieval_embedding/item_embedding.py": 1693424429.704642, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/product_retrieval_embedding/item_detection.py": 1693424429.704642, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/product_retrieval_embedding/item_model.py": 1693424429.7056422, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/pointcloud_sceneflow_estimation/sf_rcp.py": 1693424429.7036421, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/pointcloud_sceneflow_estimation/rcp_model.py": 1693424429.7036421, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/pointcloud_sceneflow_estimation/common.py": 1693424429.702642, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/pointcloud_sceneflow_estimation/pointnet2_utils.py": 1693424429.7036421, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_human_matting/model.py": 1693424429.7856474, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_human_matting/models/lraspp.py": 1693424429.7866476, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_human_matting/models/deep_guided_filter.py": 1693424429.7866476, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_human_matting/models/matting.py": 1693424429.7876475, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_human_matting/models/effv2.py": 1693424429.7866476, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_human_matting/models/decoder.py": 1693424429.7856474, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_deblur/nafnet_for_image_deblur.py": 1693424429.552632, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/facial_landmark_confidence/flc/manual_landmark_net.py": 1693424429.5306306, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/facial_landmark_confidence/flc/facial_landmark_confidence.py": 1693424429.5306306, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/track/quasi_dense_embed_tracker.py": 1693424429.809649, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/backbone/swin_transformer.py": 1693424429.8056488, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/backbone/swin_checkpoint.py": 1693424429.8046486, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/neck/fpn.py": 1693424429.808649, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/visualizer.py": 1693424429.8036487, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/video_k_net.py": 1693424429.8036487, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/head/kernel_iter_head.py": 1693424429.8066487, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/head/semantic_fpn_wrapper.py": 1693424429.807649, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/head/kernel_updator.py": 1693424429.807649, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/head/kernel_head.py": 1693424429.8056488, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/head/kernel_update_head.py": 1693424429.8066487, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/head/mask.py": 1693424429.807649, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_panoptic_segmentation/head/track_heads.py": 1693424429.808649, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_denoise/nafnet_for_image_denoise.py": 1693424429.5586326, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_denoise/nafnet/NAFNet_arch.py": 1693424429.5596325, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_denoise/nafnet/arch_util.py": 1693424429.5606327, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/crowd_counting/hrnet_aspp_relu.py": 1693424429.4816275, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/crowd_counting/cc_model.py": 1693424429.4806273, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_quality_assessment_man/maniqa.py": 1693424429.610636, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_quality_assessment_man/swin.py": 1693424429.610636, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_quality_assessment_man/image_quality_assessment_man.py": 1693424429.610636, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_emotion/face_alignment/face_align.py": 1693424429.5096292, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_emotion/face_alignment/face.py": 1693424429.5096292, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_emotion/emotion_model.py": 1693424429.5076292, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_emotion/efficient/utils.py": 1693424429.5086293, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_emotion/efficient/model.py": 1693424429.5086293, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_emotion/emotion_infer.py": 1693424429.5076292, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/DUTRAFTStabilizer.py": 1693424429.8176496, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/DUT/RAFT/raft.py": 1693424429.8216498, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/DUT/RAFT/extractor.py": 1693424429.8216498, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/DUT/RAFT/corr.py": 1693424429.8216498, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/DUT/RAFT/update.py": 1693424429.82265, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/DUT/rf_det_module.py": 1693424429.8206499, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/DUT/DUT_raft.py": 1693424429.8186495, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/DUT/config.py": 1693424429.8196497, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/DUT/rf_det_so.py": 1693424429.8206499, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/DUT/Smoother.py": 1693424429.8196497, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/DUT/MotionPro.py": 1693424429.8186495, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/utils/MedianFilter.py": 1693424429.82265, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/utils/RAFTUtils.py": 1693424429.82365, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/utils/math_utils.py": 1693424429.82465, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/utils/WarpUtils.py": 1693424429.82365, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/utils/ProjectionUtils.py": 1693424429.82365, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/utils/image_utils.py": 1693424429.82465, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_stabilization/utils/IterativeSmooth.py": 1693424429.82265, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/text_driven_segmentation/clip.py": 1693424429.7376442, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/text_driven_segmentation/model.py": 1693424429.7406445, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/text_driven_segmentation/lseg_blocks.py": 1693424429.7386444, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/text_driven_segmentation/simple_tokenizer.py": 1693424429.7406445, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/text_driven_segmentation/lseg_base.py": 1693424429.7386444, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/text_driven_segmentation/lseg_model.py": 1693424429.7386444, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/text_driven_segmentation/lseg_vit.py": 1693424429.7396445, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/text_driven_segmentation/lseg_net.py": 1693424429.7396445, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_recognition/preprocessor.py": 1693424429.6926413, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_recognition/model.py": 1693424429.6926413, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_recognition/modules/ConvNextViT/vitstr.py": 1693424429.6956415, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_recognition/modules/ConvNextViT/timm_tinyc.py": 1693424429.6946416, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_recognition/modules/ConvNextViT/main_model.py": 1693424429.6946416, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_recognition/modules/ConvNextViT/convnext.py": 1693424429.6946416, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_recognition/modules/LightweightEdge/nas_block/mix_ops.py": 1693424429.6976416, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_recognition/modules/LightweightEdge/nas_block/layers.py": 1693424429.6966417, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_recognition/modules/LightweightEdge/nas_block/proxyless.py": 1693424429.6976416, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_recognition/modules/LightweightEdge/main_model.py": 1693424429.6956415, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/ocr_recognition/modules/CRNN/main_model.py": 1693424429.6936414, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_deinterlace/deinterlace_arch.py": 1693424429.7636461, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_deinterlace/UNet_for_video_deinterlace.py": 1693424429.7636461, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_deinterlace/models/utils.py": 1693424429.7656462, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_deinterlace/models/fre.py": 1693424429.7656462, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_deinterlace/models/deep_fourier_upsampling.py": 1693424429.764646, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_deinterlace/models/archs.py": 1693424429.764646, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_deinterlace/models/enh.py": 1693424429.7656462, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_generation/op/conv2d_gradfix.py": 1693424429.5106294, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_generation/op/upfirdn2d.py": 1693424429.5116293, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_generation/op/fused_act.py": 1693424429.5116293, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_generation/stylegan2.py": 1693424429.5106294, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/super_resolution/ecbsr_model.py": 1693424429.7346442, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/super_resolution/rrdbnet_arch.py": 1693424429.7346442, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/super_resolution/arch_util.py": 1693424429.733644, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/super_resolution/ecb.py": 1693424429.7346442, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/cartoon/network.py": 1693424429.4676266, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/cartoon/utils.py": 1693424429.4676266, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/cartoon/loss.py": 1693424429.4666264, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/cartoon/mtcnn_pytorch/src/matlab_cp2tform.py": 1693424429.4706268, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/cartoon/mtcnn_pytorch/src/align_trans.py": 1693424429.4706268, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/cartoon/facelib/face_landmark.py": 1693424429.4686265, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/cartoon/facelib/face_detector.py": 1693424429.4686265, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/cartoon/facelib/facer.py": 1693424429.4696267, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/cartoon/facelib/config.py": 1693424429.4686265, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/cartoon/facelib/LK/lk.py": 1693424429.4696267, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/cartoon/model_tf.py": 1693424429.4666264, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_binary_quant_classification/binary_quant_model.py": 1693424429.536631, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_binary_quant_classification/bnext.py": 1693424429.536631, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/salient_detection/salient_model.py": 1693424429.7166429, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/salient_detection/models/modules.py": 1693424429.7166429, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/salient_detection/models/utils.py": 1693424429.717643, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/salient_detection/models/backbone/Res2Net_v1b.py": 1693424429.718643, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/salient_detection/models/u2net.py": 1693424429.717643, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/salient_detection/models/senet.py": 1693424429.717643, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_reid_person/pass_model.py": 1693424429.6146362, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_reid_person/transreid_model.py": 1693424429.6146362, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/table_recognition/model_lore.py": 1693424429.7366443, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/table_recognition/lineless_table_process.py": 1693424429.735644, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/table_recognition/modules/lore_detector.py": 1693424429.7366443, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/table_recognition/modules/lore_processor.py": 1693424429.7376442, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/track/mask_hungarian_assigner.py": 1693424429.7946482, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/track/kernel_update_head.py": 1693424429.793648, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/utils.py": 1693424429.7896478, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/neck/msdeformattn_decoder.py": 1693424429.793648, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/video_knet.py": 1693424429.7896478, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/head/kernel_iter_head.py": 1693424429.791648, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/head/kernel_updator.py": 1693424429.7926478, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/head/kernel_head.py": 1693424429.7906477, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/head/kernel_update_head.py": 1693424429.791648, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/video_instance_segmentation/head/kernel_frame_iter_head.py": 1693424429.7906477, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/animal_recognition/splat.py": 1693424429.4606261, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/animal_recognition/resnet.py": 1693424429.459626, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_driving_perception/utils.py": 1693424429.566633, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_driving_perception/preprocessor.py": 1693424429.566633, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_driving_perception/image_driving_percetion_model.py": 1693424429.566633, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vidt/fpn_fusion.py": 1693424429.8346508, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vidt/deformable_transformer.py": 1693424429.8346508, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vidt/model.py": 1693424429.8356507, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vidt/head.py": 1693424429.8356507, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/vidt/backbone.py": 1693424429.8336506, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_debanding/rrdb/rrdb_image_debanding.py": 1693424429.552632, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/language_guided_video_summarization/summarizer.py": 1693424429.6476383, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/language_guided_video_summarization/transformer/modules.py": 1693424429.6486385, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/language_guided_video_summarization/transformer/sub_layers.py": 1693424429.6486385, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/language_guided_video_summarization/transformer/models.py": 1693424429.6486385, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/language_guided_video_summarization/transformer/layers.py": 1693424429.6476383, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/fastinst_model.py": 1693424429.5836341, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/maskdino_swin.py": 1693424429.5836341, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/maskdino_model.py": 1693424429.5836341, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/backbones/swin_transformer.py": 1693424429.5856342, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/backbones/resnet.py": 1693424429.5856342, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/model.py": 1693424429.5846343, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/postprocess_utils.py": 1693424429.5846343, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/cascade_mask_rcnn_swin.py": 1693424429.5826342, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/datasets/transforms.py": 1693424429.5866344, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/fastinst/fastinst_decoder.py": 1693424429.5876343, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/fastinst/fastinst_encoder.py": 1693424429.5876343, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/maskdino/utils.py": 1693424429.5906346, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/maskdino/ms_deform_attn.py": 1693424429.5896347, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/maskdino/position_encoding.py": 1693424429.5896347, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/maskdino/dino_decoder.py": 1693424429.5886345, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/maskdino/maskdino_encoder.py": 1693424429.5896347, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_instance_segmentation/maskdino/maskdino_decoder.py": 1693424429.5886345, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/action_recognition/tada_convnext.py": 1693424429.458626, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/action_recognition/s3dg.py": 1693424429.4576259, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/action_recognition/models.py": 1693424429.4576259, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/action_recognition/temporal_patch_shift_transformer.py": 1693424429.458626, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_recognition/align_face.py": 1693424429.5146296, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_recognition/torchkit/backbone/facemask_backbone.py": 1693424429.5166297, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_recognition/torchkit/backbone/arcface_backbone.py": 1693424429.5166297, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_recognition/torchkit/backbone/model_resnet.py": 1693424429.51963, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_recognition/torchkit/backbone/model_irse.py": 1693424429.51963, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_recognition/torchkit/backbone/common.py": 1693424429.5166297, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/face_recognition/torchkit/rts_backbone.py": 1693424429.5156298, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/action_detection/action_detection_onnx.py": 1693424429.4556258, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/action_detection/modules/action_detection_pytorch.py": 1693424429.456626, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/action_detection/modules/resnet.py": 1693424429.456626, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/depe_detect.py": 1693424429.6766403, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/result_vis.py": 1693424429.6766403, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/core/bbox/coders/nms_free_coder.py": 1693424429.6796405, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/core/bbox/match_costs/match_cost.py": 1693424429.6806405, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/core/bbox/assigners/hungarian_assigner_3d.py": 1693424429.6796405, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/core/bbox/util.py": 1693424429.6786404, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/datasets/pipelines/transform_3d.py": 1693424429.6826408, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/datasets/pipelines/loading.py": 1693424429.6816406, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/datasets/nuscenes_dataset.py": 1693424429.6816406, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/models/detectors/petr3d.py": 1693424429.6856408, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/models/backbones/vovnet.py": 1693424429.6836407, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/models/necks/cp_fpn.py": 1693424429.686641, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/models/dense_heads/depth_net.py": 1693424429.684641, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/models/dense_heads/petrv2_dednhead.py": 1693424429.684641, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/models/utils/positional_encoding.py": 1693424429.687641, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/object_detection_3d/depe/mmdet3d_plugin/models/utils/petr_transformer.py": 1693424429.687641, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/referring_video_object_segmentation/model.py": 1693424429.7076423, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/referring_video_object_segmentation/utils/mttr.py": 1693424429.7096424, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/referring_video_object_segmentation/utils/position_encoding_2d.py": 1693424429.7106426, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/referring_video_object_segmentation/utils/criterion.py": 1693424429.7086425, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/referring_video_object_segmentation/utils/swin_transformer.py": 1693424429.7116425, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/referring_video_object_segmentation/utils/segmentation.py": 1693424429.7106426, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/referring_video_object_segmentation/utils/matcher.py": 1693424429.7086425, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/referring_video_object_segmentation/utils/multimodal_transformer.py": 1693424429.7096424, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/referring_video_object_segmentation/utils/misc.py": 1693424429.7086425, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/referring_video_object_segmentation/utils/backbone.py": 1693424429.7076423, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/referring_video_object_segmentation/utils/postprocessing.py": 1693424429.7106426, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_depth_estimation/newcrfs_model.py": 1693424429.5616326, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_depth_estimation/networks/newcrf_utils.py": 1693424429.5626328, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_depth_estimation/networks/newcrf_layers.py": 1693424429.5626328, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_depth_estimation/networks/newcrf_depth.py": 1693424429.5616326, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_depth_estimation/networks/swin_transformer.py": 1693424429.5626328, "/usr/local/lib/python3.10/dist-packages/modelscope/models/cv/image_depth_estimation/networks/uper_crf_head.py": 1693424429.563633, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/stable_diffusion/stable_diffusion.py": 1693424429.8896544, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa/configuration_ofa.py": 1693424429.8766534, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa/tokenization_ofa.py": 1693424429.8796537, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa/configuration_mmspeech.py": 1693424429.8766534, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa/tokenization_ofa_fast.py": 1693424429.8796537, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa/utils/utils.py": 1693424429.884654, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa/utils/constant.py": 1693424429.884654, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa/modeling_ofa.py": 1693424429.8786535, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa/resnet.py": 1693424429.8786535, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa/vit.py": 1693424429.8796537, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa/modeling_mmspeech.py": 1693424429.8776536, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa/generate/utils.py": 1693424429.8836539, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa/generate/ngram_repeat_block.py": 1693424429.8816538, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa/generate/search.py": 1693424429.882654, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa/generate/incremental_decoding_utils.py": 1693424429.8806536, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa/generate/multihead_attention.py": 1693424429.8816538, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa/generate/sequence_generator.py": 1693424429.882654, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa/generate/token_generation_constraints.py": 1693424429.8836539, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/dpm_solver_pytorch.py": 1693424429.8436513, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mplug_owl/modeling_mplug_owl.py": 1693424429.8726532, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mplug_owl/configuration_mplug_owl.py": 1693424429.871653, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/clip/bert_tokenizer.py": 1693424429.8456514, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/clip/modeling_bert.py": 1693424429.8476515, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/clip/model.py": 1693424429.8466516, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/clip/configuration_bert.py": 1693424429.8466516, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/unet_sd.py": 1693424429.9016552, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/data/transforms.py": 1693424429.9066553, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/data/samplers.py": 1693424429.9056554, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/data/tokenizers.py": 1693424429.9056554, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/ops/utils.py": 1693424429.9106557, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/ops/degration.py": 1693424429.9086556, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/ops/losses.py": 1693424429.9096556, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/ops/distributed.py": 1693424429.9096556, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/ops/random_mask.py": 1693424429.9096556, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/dpm_solver.py": 1693424429.900655, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/clip.py": 1693424429.898655, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/autoencoder.py": 1693424429.898655, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/config.py": 1693424429.898655, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/mha_flash.py": 1693424429.900655, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/annotator/histogram/palette.py": 1693424429.9036553, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/annotator/util.py": 1693424429.9026551, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/annotator/sketch/pidinet.py": 1693424429.9046552, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/annotator/sketch/sketch_simplification.py": 1693424429.9046552, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/utils/utils.py": 1693424429.9116557, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/utils/config.py": 1693424429.9116557, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/utils/distributed.py": 1693424429.9116557, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/videocomposer_model.py": 1693424429.9016552, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/models/midas.py": 1693424429.9076555, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/models/clip.py": 1693424429.9076555, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/videocomposer/diffusion.py": 1693424429.8996549, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/team/utils.py": 1693424429.8906543, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/team/team_model.py": 1693424429.8896544, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/diffusion/unet_upsampler_256.py": 1693424429.851652, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/diffusion/structbert.py": 1693424429.8496516, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/diffusion/model.py": 1693424429.8496516, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/diffusion/unet_generator.py": 1693424429.8506517, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/diffusion/unet_upsampler_1024.py": 1693424429.851652, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/diffusion/diffusion.py": 1693424429.8486516, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/diffusion/tokenizer.py": 1693424429.8506517, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mgeo/text_ranking.py": 1693424429.8626525, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mgeo/text_classification.py": 1693424429.8626525, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mgeo/token_classification.py": 1693424429.8626525, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mgeo/backbone.py": 1693424429.8616524, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/image_to_video/image_to_video_model.py": 1693424429.856652, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/image_to_video/utils/config.py": 1693424429.8596523, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/image_to_video/utils/transforms.py": 1693424429.8606524, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/image_to_video/utils/shedule.py": 1693424429.8606524, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/image_to_video/utils/seed.py": 1693424429.8596523, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/image_to_video/utils/diffusion.py": 1693424429.8596523, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/image_to_video/modules/unet_i2v.py": 1693424429.8586524, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/image_to_video/modules/autoencoder.py": 1693424429.8576522, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/image_to_video/modules/embedder.py": 1693424429.8576522, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/soonet/utils.py": 1693424429.8886542, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/soonet/clip.py": 1693424429.8866541, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/soonet/blocks.py": 1693424429.8866541, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/soonet/model.py": 1693424429.887654, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/soonet/swin_transformer.py": 1693424429.887654, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/soonet/tokenizer.py": 1693424429.887654, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/efficient_diffusion_tuning/efficient_stable_diffusion.py": 1693424429.8526518, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/video_synthesis/unet_sd.py": 1693424429.8926544, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/video_synthesis/autoencoder.py": 1693424429.8906543, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/video_synthesis/text_to_video_synthesis_model.py": 1693424429.8916545, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/video_synthesis/diffusion.py": 1693424429.8916545, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mplug/clip/clip.py": 1693424429.8706532, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mplug/modeling_mplug.py": 1693424429.8686528, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mplug/predictor.py": 1693424429.869653, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mplug/mvit.py": 1693424429.869653, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mplug/configuration_mplug.py": 1693424429.867653, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa_for_all_tasks.py": 1693424429.8446515, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/guided_diffusion/script.py": 1693424429.855652, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/guided_diffusion/respace.py": 1693424429.855652, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/guided_diffusion/unet.py": 1693424429.855652, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/guided_diffusion/gaussian_diffusion.py": 1693424429.854652, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/video_to_video/video_to_video_model.py": 1693424429.8936546, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/video_to_video/utils/solvers_sdedit.py": 1693424429.8976548, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/video_to_video/utils/config.py": 1693424429.8956547, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/video_to_video/utils/transforms.py": 1693424429.8976548, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/video_to_video/utils/seed.py": 1693424429.8966548, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/video_to_video/utils/schedules_sdedit.py": 1693424429.8966548, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/video_to_video/utils/diffusion_sdedit.py": 1693424429.8966548, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/video_to_video/modules/autoencoder.py": 1693424429.8946548, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/video_to_video/modules/embedder.py": 1693424429.8946548, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/video_to_video/modules/unet_v2v.py": 1693424429.8946548, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/ofa_for_text_to_image_synthesis_model.py": 1693424429.8456514, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/vldoc/conv_fpn_trans.py": 1693424429.9126558, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/vldoc/processing.py": 1693424429.914656, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/vldoc/transformer_local.py": 1693424429.914656, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/vldoc/model.py": 1693424429.9126558, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/vldoc/tokenization.py": 1693424429.914656, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/vldoc/convnext.py": 1693424429.9126558, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/vldoc/modeling_layout_roberta.py": 1693424429.913656, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/rleg/rleg.py": 1693424429.885654, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/rleg/model.py": 1693424429.885654, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mmr/dataloaders/rawvideo_util.py": 1693424429.8636527, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mmr/models/tokenization_clip.py": 1693424429.8666527, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mmr/models/module_cross.py": 1693424429.8666527, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mmr/models/modeling.py": 1693424429.8656528, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mmr/models/module_clip.py": 1693424429.8656528, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mmr/models/dynamic_inverted_softmax.py": 1693424429.8646526, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mmr/models/until_module.py": 1693424429.8666527, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py": 1693424429.8646526, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/multi_stage_diffusion/clip.py": 1693424429.8726532, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/multi_stage_diffusion/model.py": 1693424429.8736532, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/multi_stage_diffusion/xglm.py": 1693424429.8756533, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/multi_stage_diffusion/prior.py": 1693424429.8746533, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/multi_stage_diffusion/upsampler.py": 1693424429.8756533, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/multi_stage_diffusion/gaussian_diffusion.py": 1693424429.8736532, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/multi_stage_diffusion/decoder.py": 1693424429.8736532, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/multi_stage_diffusion/tokenizer.py": 1693424429.8746533, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/mplug_for_all_tasks.py": 1693424429.8446515, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/clip_interrogator/model.py": 1693424429.8486516, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/gemm/gemm_base.py": 1693424429.853652, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/gemm/gemm_model.py": 1693424429.853652, "/usr/local/lib/python3.10/dist-packages/modelscope/models/multi_modal/gemm/tokenizer.py": 1693424429.853652, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/aec/network/loss.py": 1693424429.429624, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/aec/network/modulation_loss.py": 1693424429.430624, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/aec/network/se_net.py": 1693424429.430624, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/aec/layers/activations.py": 1693424429.427624, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/aec/layers/layer_base.py": 1693424429.428624, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/aec/layers/uni_deep_fsmn.py": 1693424429.429624, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/aec/layers/deep_fsmn.py": 1693424429.428624, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/aec/layers/affine_transform.py": 1693424429.427624, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/separation/mossformer_conv_module.py": 1693424429.443625, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/separation/mossformer_block.py": 1693424429.443625, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/separation/mossformer.py": 1693424429.4426248, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/separation/layer_norm.py": 1693424429.4426248, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/asr/generic_automatic_speech_recognition.py": 1693424429.4356244, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/asr/wenet_automatic_speech_recognition.py": 1693424429.4356244, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/tts/sambert_hifi.py": 1693424429.4496253, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/tts/voice.py": 1693424429.4506254, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/kws/nearfield/model.py": 1693424429.4416249, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/kws/nearfield/fsmn.py": 1693424429.4406247, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/kws/nearfield/cmvn.py": 1693424429.4406247, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/kws/farfield/model.py": 1693424429.4396248, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/kws/farfield/fsmn_sele_v3.py": 1693424429.4396248, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/kws/farfield/fsmn.py": 1693424429.4386246, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/kws/farfield/model_def.py": 1693424429.4396248, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/kws/farfield/fsmn_sele_v2.py": 1693424429.4386246, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/kws/generic_key_word_spotting.py": 1693424429.4366245, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/itn/generic_inverse_text_processing.py": 1693424429.4366245, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/ans/conv_stft.py": 1693424429.4316242, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/ans/complex_nn.py": 1693424429.4316242, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/ans/layers/activations.py": 1693424429.4336243, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/ans/layers/layer_base.py": 1693424429.4346244, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/ans/layers/uni_deep_fsmn.py": 1693424429.4346244, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/ans/layers/affine_transform.py": 1693424429.4336243, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/ans/denoise_net.py": 1693424429.4316242, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/ans/frcrn.py": 1693424429.4326243, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/ans/unet.py": 1693424429.4326243, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/ans/se_module_complex.py": 1693424429.4326243, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/speaker_diarization_semantic_speaker_turn_detection.py": 1693424429.4486253, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/pooling_layers.py": 1693424429.4476252, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/DTDNN_layers.py": 1693424429.4446251, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/ecapa_tdnn.py": 1693424429.445625, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/fusion.py": 1693424429.4466252, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/generic_speaker_verification.py": 1693424429.4466252, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/ERes2Net_aug.py": 1693424429.4446251, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/speaker_diarization_dialogue_detection.py": 1693424429.4486253, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/DTDNN.py": 1693424429.443625, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/ERes2Net.py": 1693424429.4446251, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/speaker_change_locator.py": 1693424429.4486253, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/lanuage_recognition_model.py": 1693424429.4466252, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/cluster_backend.py": 1693424429.445625, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/sv/rdino.py": 1693424429.4476252, "/usr/local/lib/python3.10/dist-packages/modelscope/models/audio/punc/generic_punctuation.py": 1693424429.4416249, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/movie_scene_segmentation_metric.py": 1693424429.4186232, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/image_denoise_metric.py": 1693424429.416623, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/bleu_metric.py": 1693424429.414623, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/ppl_metric.py": 1693424429.4196234, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/text_generation_metric.py": 1693424429.4216235, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/image_inpainting_metric.py": 1693424429.416623, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/image_colorization_metric.py": 1693424429.4156232, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/map_metric.py": 1693424429.4186232, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/ocr_recognition_metric.py": 1693424429.4196234, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/video_frame_interpolation_metric.py": 1693424429.4226236, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/video_summarization_metric.py": 1693424429.4236236, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/base.py": 1693424429.414623, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/token_classification_metric.py": 1693424429.4216235, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/image_color_enhance_metric.py": 1693424429.4156232, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/video_stabilization_metric.py": 1693424429.4226236, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/inbatch_recall_metric.py": 1693424429.4186232, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/prediction_saving_wrapper.py": 1693424429.4206235, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/ciderD/ciderD.py": 1693424429.4236236, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/ciderD/ciderD_scorer.py": 1693424429.4246237, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/image_quality_assessment_mos_metric.py": 1693424429.4176233, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/image_quality_assessment_degradation_metric.py": 1693424429.4176233, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/image_instance_segmentation_metric.py": 1693424429.416623, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/text_ranking_metric.py": 1693424429.4216235, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/sequence_classification_metric.py": 1693424429.4206235, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/image_portrait_enhancement_metric.py": 1693424429.4176233, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/referring_video_object_segmentation_metric.py": 1693424429.4206235, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/builder.py": 1693424429.414623, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/translation_evaluation_metric.py": 1693424429.4226236, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/audio_noise_metric.py": 1693424429.414623, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/loss_metric.py": 1693424429.4186232, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/ned_metric.py": 1693424429.4196234, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/video_super_resolution_metric/niqe.py": 1693424429.425624, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/video_super_resolution_metric/video_super_resolution_metric.py": 1693424429.425624, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/video_super_resolution_metric/matlab_functions.py": 1693424429.4246237, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/video_super_resolution_metric/metric_util.py": 1693424429.425624, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/accuracy_metric.py": 1693424429.413623, "/usr/local/lib/python3.10/dist-packages/modelscope/metrics/action_detection_evaluator.py": 1693424429.413623, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/table_question_answering_pipeline.py": 1693424430.1656725, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/machine_reading_comprehension_pipeline.py": 1693424430.1626723, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/token_classification_pipeline.py": 1693424430.1676726, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/fasttext_text_classification_pipeline.py": 1693424430.159672, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/user_satisfaction_estimation_pipeline.py": 1693424430.1686726, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/dialog_intent_prediction_pipeline.py": 1693424430.1556718, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/document_grounded_dialog_retrieval_pipeline.py": 1693424430.158672, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/word_alignment_pipeline.py": 1693424430.1686726, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/language_identification_pipline.py": 1693424430.1626723, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/named_entity_recognition_pipeline.py": 1693424430.1636724, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/information_extraction_pipeline.py": 1693424430.161672, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/siamese_uie_pipeline.py": 1693424430.1646724, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/translation_pipeline.py": 1693424430.1676726, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/distributed_gpt_moe_pipeline.py": 1693424430.1566718, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/mglm_text_summarization_pipeline.py": 1693424430.1636724, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/interactive_translation_pipeline.py": 1693424430.161672, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/translation_evaluation_pipeline.py": 1693424430.1676726, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/document_grounded_dialog_rerank_pipeline.py": 1693424430.157672, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_generation_pipeline.py": 1693424430.1666725, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/sentence_embedding_pipeline.py": 1693424430.1646724, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_error_correction_pipeline.py": 1693424430.1666725, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/faq_question_answering_pipeline.py": 1693424430.159672, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/document_grounded_dialog_generate_pipeline.py": 1693424430.157672, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_classification_pipeline.py": 1693424430.1656725, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/distributed_gpt3_pipeline.py": 1693424430.1566718, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/fid_dialogue_pipeline.py": 1693424430.1606722, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/conversational_text_to_sql_pipeline.py": 1693424430.1546717, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/dialog_modeling_pipeline.py": 1693424430.1556718, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/document_segmentation_pipeline.py": 1693424430.158672, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/codegeex_code_translation_pipeline.py": 1693424430.1546717, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/translation_quality_estimation_pipeline.py": 1693424430.1686726, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/glm130b_text_generation_pipeline.py": 1693424430.1606722, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/text_ranking_pipeline.py": 1693424430.1666725, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/distributed_plug_pipeline.py": 1693424430.1566718, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/zero_shot_classification_pipeline.py": 1693424430.1696727, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/canmt_translation_pipeline.py": 1693424430.1536717, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/feature_extraction_pipeline.py": 1693424430.159672, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/automatic_post_editing_pipeline.py": 1693424430.1536717, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/codegeex_code_generation_pipeline.py": 1693424430.1546717, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/polylm_text_generation_pipeline.py": 1693424430.1636724, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/llama2_text_generation_pipeline.py": 1693424430.1626723, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/fill_mask_pipeline.py": 1693424430.1606722, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/dialog_state_tracking_pipeline.py": 1693424430.1556718, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/summarization_pipeline.py": 1693424430.1646724, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/extractive_summarization_pipeline.py": 1693424430.158672, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/nlp/word_segmentation_pipeline.py": 1693424430.1696727, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/base.py": 1693424430.0776668, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/pipeline_template.py": 1693424430.0786667, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/science/protein_structure_pipeline.py": 1693424430.170673, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/builder.py": 1693424430.0786667, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/vision_middleware_pipeline.py": 1693424430.1336703, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/shop_segmentation_pipleline.py": 1693424430.1246698, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/referring_video_object_segmentation_pipeline.py": 1693424430.1236696, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/tbs_detection_pipeline.py": 1693424430.1256697, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_single_object_tracking_pipeline.py": 1693424430.1316702, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_liveness_xc_pipeline.py": 1693424430.097668, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_salient_detection_pipeline.py": 1693424430.112669, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_driving_perception_pipeline.py": 1693424430.1066685, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_denoise_pipeline.py": 1693424430.1056685, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/mtcnn_face_detection_pipeline.py": 1693424430.1186693, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_classification_pipeline.py": 1693424430.1036685, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_depth_estimation_pipeline.py": 1693424430.12867, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_panoptic_segmentation_pipeline.py": 1693424430.1096687, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/product_retrieval_embedding_pipeline.py": 1693424430.1226697, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_debanding_pipeline.py": 1693424430.1046684, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/tinynas_classification_pipeline.py": 1693424430.12667, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_portrait_enhancement_pipeline.py": 1693424430.110669, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/maskdino_instance_segmentation_pipeline.py": 1693424430.1176693, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_depth_estimation_pipeline.py": 1693424430.1056685, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_colorization_pipeline.py": 1693424430.1036685, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_quality_assessment_degradation_pipeline.py": 1693424430.110669, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/movie_scene_segmentation_pipeline.py": 1693424430.1186693, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/nerf_recon_4k_pipeline.py": 1693424430.1196694, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_inpainting_pipeline.py": 1693424430.1076686, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_style_transfer_pipeline.py": 1693424430.1136692, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ocr_utils/resnet_utils.py": 1693424430.1386707, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ocr_utils/model_resnet_mutex_v4_linewithchar.py": 1693424430.1366706, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ocr_utils/utils.py": 1693424430.1386707, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ocr_utils/model_vlpt.py": 1693424430.1366706, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ocr_utils/ocr_modules/vitstr.py": 1693424430.1406708, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ocr_utils/ocr_modules/timm_tinyc.py": 1693424430.1396708, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ocr_utils/ocr_modules/convnext.py": 1693424430.1396708, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ocr_utils/model_resnet18_half.py": 1693424430.1356704, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ocr_utils/model_convnext_transformer.py": 1693424430.1356704, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ocr_utils/resnet18_v1.py": 1693424430.1376705, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ocr_utils/model_dla34.py": 1693424430.1356704, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ocr_utils/table_process.py": 1693424430.1386707, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ocr_utils/ops.py": 1693424430.1376705, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/realtime_video_object_detection_pipeline.py": 1693424430.1236696, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/nerf_recon_acc_pipeline.py": 1693424430.1196694, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_attribute_recognition_pipeline.py": 1693424430.0956678, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/card_detection_pipeline.py": 1693424430.0926676, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/arc_face_recognition_pipeline.py": 1693424430.0906675, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/vidt_pipeline.py": 1693424430.1326704, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_semantic_segmentation_pipeline.py": 1693424430.112669, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/language_guided_video_summarization_pipeline.py": 1693424430.1156693, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_super_resolution_pipeline.py": 1693424430.1326704, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_emotion_pipeline.py": 1693424430.096668, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_detection_pipeline.py": 1693424430.0956678, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_instance_segmentation_pipeline.py": 1693424430.13067, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_matting_pipeline.py": 1693424430.1086688, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_inpainting_pipeline.py": 1693424430.1296701, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/table_recognition_pipeline.py": 1693424430.1256697, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_quality_assessment_mos_pipeline.py": 1693424430.1116688, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/nerf_recon_vq_compression_pipeline.py": 1693424430.1196694, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_human_parsing_pipeline.py": 1693424430.1066685, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_body_reshaping_pipeline.py": 1693424430.1026683, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/indoor_layout_estimation_pipeline.py": 1693424430.114669, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_colorization_pipeline.py": 1693424430.12767, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_paintbyexample_pipeline.py": 1693424430.1096687, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/bad_image_detecting_pipeline.py": 1693424430.0906675, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/license_plate_detection_pipeline.py": 1693424430.1156693, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ddpm_semantic_segmentation_pipeline.py": 1693424430.0956678, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ddcolor_image_colorization_pipeline.py": 1693424430.0946677, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_cartoon_pipeline.py": 1693424430.1026683, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_summarization_pipeline.py": 1693424430.1326704, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_mvs_depth_estimation_pipeline.py": 1693424430.1086688, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/pedestrian_attribute_recognition_pipeline.py": 1693424430.1216695, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/mask_face_recognition_pipeline.py": 1693424430.1166692, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_reconstruction_pipeline.py": 1693424430.099668, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_recognition_ood_pipeline.py": 1693424430.098668, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_super_resolution_pipeline.py": 1693424430.1136692, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/facial_landmark_confidence_pipeline.py": 1693424430.1006682, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_defrcn_fewshot_pipeline.py": 1693424430.1046684, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/content_check_pipeline.py": 1693424430.0936677, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_processing_base_pipeline.py": 1693424430.097668, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/motion_generation_pipeline.py": 1693424430.1176693, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/action_detection_pipeline.py": 1693424430.0896676, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_try_on_pipeline.py": 1693424430.114669, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_frame_interpolation_pipeline.py": 1693424430.12867, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ocr_detection_pipeline.py": 1693424430.1206696, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_stabilization_pipeline.py": 1693424430.1316702, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/virtual_try_on_pipeline.py": 1693424430.1336703, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/vision_efficient_tuning_pipeline.py": 1693424430.1336703, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/action_recognition_pipeline.py": 1693424430.0896676, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/text_to_360panorama_image_pipeline.py": 1693424430.12667, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/lineless_table_recognition_pipeline.py": 1693424430.1166692, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_color_enhance_pipeline.py": 1693424430.1036685, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_structured_model_probing_pipeline.py": 1693424430.112669, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_restoration_pipeline.py": 1693424430.1116688, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/crowd_counting_pipeline.py": 1693424430.0946677, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/panorama_depth_estimation_s2net_pipeline.py": 1693424430.1216695, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/object_detection_3d_pipeline.py": 1693424430.1206696, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_inpainting_sdv2_pipeline.py": 1693424430.1076686, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/panorama_depth_estimation_pipeline.py": 1693424430.1216695, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_instance_segmentation_pipeline.py": 1693424430.1076686, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/pointcloud_sceneflow_estimation_pipeline.py": 1693424430.1226697, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/vop_retrieval_se_pipeline.py": 1693424430.1346705, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_face_fusion_pipeline.py": 1693424430.1066685, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/mog_face_detection_pipeline.py": 1693424430.1176693, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/general_recognition_pipeline.py": 1693424430.1006682, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/animal_recognition_pipeline.py": 1693424430.0906675, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_object_segmentation_pipeline.py": 1693424430.13067, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/text_driven_segmentation_pipleline.py": 1693424430.1256697, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/mobile_image_super_resolution_pipeline.py": 1693424430.1176693, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_quality_assessment_man_pipeline.py": 1693424430.110669, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_multi_object_tracking_pipeline.py": 1693424430.13067, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/live_category_pipeline.py": 1693424430.1166692, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_human_matting_pipeline.py": 1693424430.1296701, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_recognition_pipeline.py": 1693424430.099668, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_skychange_pipeline.py": 1693424430.112669, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_open_vocabulary_detection_pipeline.py": 1693424430.1096687, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/cmdssl_video_embedding_pipeline.py": 1693424430.0936677, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/human_reconstruction_pipeline.py": 1693424430.1016684, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/skin_retouching_pipeline.py": 1693424430.1246698, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_to_image_generate_pipeline.py": 1693424430.1136692, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_human_hand_detection_pipeline.py": 1693424430.096668, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_image_generation_pipeline.py": 1693424430.096668, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/hicossl_video_embedding_pipeline.py": 1693424430.1016684, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/fast_instance_segmentation_pipeline.py": 1693424430.1006682, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_detection_pipeline.py": 1693424430.1056685, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/product_segmentation_pipeline.py": 1693424430.1236696, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/tbs_detection_utils/utils.py": 1693424430.1406708, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_matching_pipeline.py": 1693424430.1086688, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/retina_face_detection_pipeline.py": 1693424430.1246698, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_panoptic_segmentation_pipeline.py": 1693424430.1316702, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/tinynas_detection_pipeline.py": 1693424430.12667, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_bts_depth_estimation_pipeline.py": 1693424430.1026683, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/facial_expression_recognition_pipeline.py": 1693424430.099668, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/vop_retrieval_pipeline.py": 1693424430.1346705, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_category_pipeline.py": 1693424430.12767, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_deblur_pipeline.py": 1693424430.1046684, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_recognition_onnx_ir_pipeline.py": 1693424430.098668, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/hand_static_pipeline.py": 1693424430.1016684, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_reid_person_pipeline.py": 1693424430.1116688, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ocr_recognition_pipeline.py": 1693424430.1206696, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_quality_assessment_pipeline.py": 1693424430.097668, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/body_3d_keypoints_pipeline.py": 1693424430.0926676, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_recognition_onnx_fm_pipeline.py": 1693424430.098668, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/controllable_image_generation_pipeline.py": 1693424430.0946677, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/body_2d_keypoints_pipeline.py": 1693424430.0926676, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/ulfd_face_detection_pipeline.py": 1693424430.12767, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/image_to_image_translation_pipeline.py": 1693424430.114669, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/face_liveness_ir_pipeline.py": 1693424430.096668, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/cv/video_deinterlace_pipeline.py": 1693424430.12867, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/util.py": 1693424430.0786667, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/video_to_video_pipeline.py": 1693424430.1486714, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/text2sql_pipeline.py": 1693424430.1466713, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/visual_grounding_pipeline.py": 1693424430.1496713, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/chinese_stable_diffusion_pipeline.py": 1693424430.1516716, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py": 1693424430.1516716, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/diffusers_wrapped/diffusers_pipeline.py": 1693424430.1506715, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/sudoku_pipeline.py": 1693424430.1456711, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/generative_multi_modal_embedding_pipeline.py": 1693424430.1426709, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/soonet_video_temporal_grounding_pipeline.py": 1693424430.1456711, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/image_to_video_pipeline.py": 1693424430.143671, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/video_question_answering_pipeline.py": 1693424430.1476712, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/visual_entailment_pipeline.py": 1693424430.1496713, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/disco_guided_diffusion_pipeline/utils.py": 1693424430.1526716, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/disco_guided_diffusion_pipeline/disco_guided_diffusion.py": 1693424430.1526716, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/text_to_image_synthesis_pipeline.py": 1693424430.1466713, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/mgeo_ranking_pipeline.py": 1693424430.144671, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/video_multi_modal_embedding_pipeline.py": 1693424430.1476712, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/team_multi_modal_similarity_pipeline.py": 1693424430.1466713, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/multi_modal_embedding_pipeline.py": 1693424430.144671, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/asr_pipeline.py": 1693424430.141671, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/multimodal_dialogue_pipeline.py": 1693424430.144671, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/document_vl_embedding_pipeline.py": 1693424430.141671, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/text_to_video_synthesis_pipeline.py": 1693424430.1476712, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/image_text_retrieval_pipeline.py": 1693424430.143671, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/efficient_diffusion_tuning_pipeline.py": 1693424430.1426709, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/image_captioning_pipeline.py": 1693424430.143671, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/video_captioning_pipeline.py": 1693424430.1476712, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/visual_question_answering_pipeline.py": 1693424430.1496713, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/gridvlp_pipeline.py": 1693424430.1426709, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/ocr_recognition_pipeline.py": 1693424430.1456711, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/multi_modal/videocomposer_pipeline.py": 1693424430.1486714, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/language_recognition_pipeline.py": 1693424430.082667, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/text_to_speech_pipeline.py": 1693424430.0886674, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/speaker_verification_light_pipeline.py": 1693424430.0876672, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/speaker_diarization_pipeline.py": 1693424430.0866673, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/lm_infer_pipeline.py": 1693424430.083667, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/ans_pipeline.py": 1693424430.0796669, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/segmentation_clustering_pipeline.py": 1693424430.0846672, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/ans_dfsmn_pipeline.py": 1693424430.0796669, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/speaker_diarization_dialogue_detection_pipeline.py": 1693424430.0856671, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/kws_kwsbp_pipeline.py": 1693424430.081667, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/speaker_diarization_semantic_speaker_turn_detection_pipeline.py": 1693424430.0866673, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/punctuation_processing_pipeline.py": 1693424430.0846672, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/kws_farfield_pipeline.py": 1693424430.081667, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/speaker_verification_rdino_pipeline.py": 1693424430.0876672, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/inverse_text_processing_pipeline.py": 1693424430.081667, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/speaker_verification_eres2net_pipeline.py": 1693424430.0866673, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/asr_wenet_inference_pipeline.py": 1693424430.0806668, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/voice_activity_detection_pipeline.py": 1693424430.0886674, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/timestamp_pipeline.py": 1693424430.0886674, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/separation_pipeline.py": 1693424430.0846672, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/speaker_verification_pipeline.py": 1693424430.0876672, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/asr_inference_pipeline.py": 1693424430.0806668, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/linear_aec_pipeline.py": 1693424430.082667, "/usr/local/lib/python3.10/dist-packages/modelscope/pipelines/audio/speaker_change_locating_pipeline.py": 1693424430.0856671, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/image.py": 1693424430.172673, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/text_clean.py": 1693424430.1866739, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/canmt_translation.py": 1693424430.1816735, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/document_grounded_dialog_rerank_preprocessor.py": 1693424430.1826737, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/word_alignment_preprocessor.py": 1693424430.190674, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space_T_en/conversational_text_to_sql_preprocessor.py": 1693424430.1996746, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space_T_en/fields/process_dataset.py": 1693424430.201675, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space_T_en/fields/preprocess_dataset.py": 1693424430.201675, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space_T_en/fields/common_utils.py": 1693424430.2006748, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space_T_en/fields/parse.py": 1693424430.2006748, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/relation_extraction_preprocessor.py": 1693424430.1856737, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/bert_seq_cls_tokenizer.py": 1693424430.1806734, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/sentence_embedding_preprocessor.py": 1693424430.1856737, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/token_classification_viet_preprocessor.py": 1693424430.188674, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/text_generation_preprocessor.py": 1693424430.1876738, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/siamese_uie_preprocessor.py": 1693424430.1856737, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/utils.py": 1693424430.1896741, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space/dst_processors.py": 1693424430.1936743, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space/tensorlistdataset.py": 1693424430.1946743, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space/lazy_dataset.py": 1693424430.1936743, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space/dialog_modeling_preprocessor.py": 1693424430.1926742, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space/dialog_state_tracking_preprocessor.py": 1693424430.1926742, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space/preprocess.py": 1693424430.1946743, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space/dialog_intent_prediction_preprocessor.py": 1693424430.1916742, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space/data_loader.py": 1693424430.1916742, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space/fields/intent_field.py": 1693424430.1966746, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space/fields/gen_field.py": 1693424430.1966746, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space/sampler.py": 1693424430.1946743, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space/batch.py": 1693424430.1916742, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space/tokenizer.py": 1693424430.1956744, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space/args.py": 1693424430.1916742, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/token_classification_preprocessor.py": 1693424430.188674, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/mgeo_ranking_preprocessor.py": 1693424430.1846738, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/fill_mask_preprocessor.py": 1693424430.1836736, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/faq_question_answering_preprocessor.py": 1693424430.1836736, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/translation_evaluation_preprocessor.py": 1693424430.1896741, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space_T_cn/table_question_answering_preprocessor.py": 1693424430.1976745, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space_T_cn/fields/struct.py": 1693424430.1986747, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space_T_cn/fields/database.py": 1693424430.1986747, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/space_T_cn/fields/schema_link.py": 1693424430.1986747, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/machine_reading_comprehension_preprocessor.py": 1693424430.1846738, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/transformers_tokenizer.py": 1693424430.1896741, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/document_grounded_dialog_generate_preprocessor.py": 1693424430.1816735, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/token_classification_thai_preprocessor.py": 1693424430.188674, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/mglm_summarization_preprocessor.py": 1693424430.1846738, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/dialog_classification_use_preprocessor.py": 1693424430.1816735, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/feature_extraction_preprocessor.py": 1693424430.1836736, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/document_grounded_dialog_retrieval_preprocessor.py": 1693424430.1826737, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/document_segmentation_preprocessor.py": 1693424430.1826737, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/text_classification_preprocessor.py": 1693424430.1866739, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/zero_shot_classification_preprocessor.py": 1693424430.190674, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/text_error_correction.py": 1693424430.1866739, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/nlp/text_ranking_preprocessor.py": 1693424430.1876738, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/visual_entailment.py": 1693424430.2056751, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/text2sql.py": 1693424430.204675, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/text_classification.py": 1693424430.2056751, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/image_classification.py": 1693424430.203675, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/text_to_image_synthesis.py": 1693424430.2056751, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/ocr_recognition.py": 1693424430.203675, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/base.py": 1693424430.2026749, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/sudoku.py": 1693424430.204675, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/visual_grounding.py": 1693424430.206675, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/image_captioning.py": 1693424430.2026749, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/asr.py": 1693424430.2026749, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/utils/audio_helper.py": 1693424430.2076752, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/utils/vision_helper.py": 1693424430.2096753, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/utils/collate.py": 1693424430.2076752, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/utils/transforms.py": 1693424430.2096753, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/utils/constant.py": 1693424430.2086754, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/utils/bridge_content_encoder.py": 1693424430.2076752, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/utils/random_help.py": 1693424430.2086754, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/utils/text2phone.py": 1693424430.2096753, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/utils/get_tables.py": 1693424430.2086754, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/visual_question_answering.py": 1693424430.206675, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/ofa/summarization.py": 1693424430.204675, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/base.py": 1693424430.1716728, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/speaker.py": 1693424430.173673, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/science/uni_fold.py": 1693424430.2106755, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/multi_modal.py": 1693424430.173673, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/movie_scene_segmentation/transforms.py": 1693424430.1806734, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/asr.py": 1693424430.1716728, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/audio.py": 1693424430.1716728, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/builder.py": 1693424430.172673, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/common.py": 1693424430.172673, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/video_super_resolution.py": 1693424430.1796734, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/image_classification_preprocessor.py": 1693424430.1766732, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/bad_image_detecting_preprocessor.py": 1693424430.175673, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/cv2_transforms.py": 1693424430.1766732, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/mmcls_preprocessor.py": 1693424430.1786733, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/timer.py": 1693424430.1786733, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/image_restoration_preprocessor.py": 1693424430.1776733, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/image_quality_assessment_mos.py": 1693424430.1776733, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/action_detection_mapper.py": 1693424430.175673, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/util.py": 1693424430.1786733, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/image_quality_assessment_man.py": 1693424430.1776733, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/controllable_image_generation.py": 1693424430.1766732, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/cv/video_stabilization.py": 1693424430.1796734, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/tts.py": 1693424430.174673, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/kws.py": 1693424430.173673, "/usr/local/lib/python3.10/dist-packages/modelscope/preprocessors/video.py": 1693424430.174673, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/training_args.py": 1693424430.2186759, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/lrscheduler/warmup/base.py": 1693424430.2406774, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/lrscheduler/warmup/warmup.py": 1693424430.2416775, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/lrscheduler/builder.py": 1693424430.2406774, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/sequence_classification_trainer.py": 1693424430.2536783, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/faq_question_answering_trainer.py": 1693424430.2516782, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/text_generation_trainer.py": 1693424430.2546782, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/siamese_uie_trainer.py": 1693424430.2536783, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/table_question_answering_trainer.py": 1693424430.2546782, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/space/dialog_modeling_trainer.py": 1693424430.2566783, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/space/eval.py": 1693424430.2576785, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/space/metrics/metrics_tracker.py": 1693424430.2586787, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/space/trainer/intent_trainer.py": 1693424430.2596786, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/space/trainer/gen_trainer.py": 1693424430.2596786, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/space/dialog_intent_trainer.py": 1693424430.2566783, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/document_grounded_dialog_rerank_trainer.py": 1693424430.250678, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/document_grounded_dialog_retrieval_trainer.py": 1693424430.2516782, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/gpt3_trainer.py": 1693424430.2526782, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/csanmt_translation_trainer.py": 1693424430.250678, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/translation_evaluation_trainer.py": 1693424430.2556784, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/sentence_embedding_trainer.py": 1693424430.2536783, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/document_grounded_dialog_generate_trainer.py": 1693424430.250678, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/plug_trainer.py": 1693424430.2526782, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/text_ranking_trainer.py": 1693424430.2546782, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp/gpt_moe_trainer.py": 1693424430.2526782, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/default_config.py": 1693424430.217676, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/optimizer/builder.py": 1693424430.2606788, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/base.py": 1693424430.2166758, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/nlp_trainer.py": 1693424430.217676, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/parallel/utils.py": 1693424430.2616787, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/parallel/builder.py": 1693424430.2616787, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cli_argument_parser.py": 1693424430.2166758, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/utils/inference.py": 1693424430.2626789, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/utils/log_buffer.py": 1693424430.2626789, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/builder.py": 1693424430.2166758, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/card_detection_scrfd_trainer.py": 1693424430.2246764, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/image_detection_damoyolo_trainer.py": 1693424430.2266765, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/referring_video_object_segmentation_trainer.py": 1693424430.2296767, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/ocr_detection_db_trainer.py": 1693424430.2296767, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/vision_efficient_tuning_trainer.py": 1693424430.2306767, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/image_instance_segmentation_trainer.py": 1693424430.2276766, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/ocr_recognition_trainer.py": 1693424430.2296767, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/face_detection_scrfd_trainer.py": 1693424430.2256763, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/image_portrait_enhancement_trainer.py": 1693424430.2276766, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/image_defrcn_fewshot_detection_trainer.py": 1693424430.2266765, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/cartoon_translation_trainer.py": 1693424430.2256763, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/movie_scene_segmentation_trainer.py": 1693424430.2286766, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/nerf_recon_acc_trainer.py": 1693424430.2286766, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/image_inpainting_trainer.py": 1693424430.2276766, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/image_classifition_trainer.py": 1693424430.2256763, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/cv/action_detection_trainer.py": 1693424430.2246764, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/trainer.py": 1693424430.2186759, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/mgeo_ranking_trainer.py": 1693424430.2416775, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/stable_diffusion/stable_diffusion_trainer.py": 1693424430.248678, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/ofa/ofa_trainer.py": 1693424430.2476778, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/ofa/ofa_trainer_utils.py": 1693424430.2476778, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/clip/clip_trainer_utils.py": 1693424430.2436776, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/clip/clip_trainer.py": 1693424430.2426775, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py": 1693424430.2436776, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/team/team_trainer.py": 1693424430.249678, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/team/team_trainer_utils.py": 1693424430.249678, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/dreambooth_diffusion/dreambooth_diffusion_trainer.py": 1693424430.2446775, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/efficient_diffusion_tuning/efficient_diffusion_tuning_trainer.py": 1693424430.2456777, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/mplug/mplug_trainer.py": 1693424430.2466779, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/multi_modal/lora_diffusion/lora_diffusion_trainer.py": 1693424430.2456777, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/iter_timer_hook.py": 1693424430.232677, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/priority.py": 1693424430.232677, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/optimizer/base.py": 1693424430.2396774, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/optimizer/torch_optimizer_hook.py": 1693424430.2396774, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/optimizer/apex_optimizer_hook.py": 1693424430.2396774, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/compression/utils.py": 1693424430.235677, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/compression/sparsity_hook.py": 1693424430.235677, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/checkpoint/checkpoint_processor.py": 1693424430.234677, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/checkpoint/checkpoint_hook.py": 1693424430.233677, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/checkpoint/load_checkpoint_hook.py": 1693424430.234677, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/evaluation_hook.py": 1693424430.2316768, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/clip_clamp_logit_scale_hook.py": 1693424430.2306767, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/early_stop_hook.py": 1693424430.2316768, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/logger/base.py": 1693424430.237677, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/logger/tensorboard_hook.py": 1693424430.237677, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/logger/text_logger_hook.py": 1693424430.2386773, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/builder.py": 1693424430.2306767, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/lr_scheduler_hook.py": 1693424430.232677, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/distributed/megatron_hook.py": 1693424430.2366772, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/distributed/ddp_hook.py": 1693424430.2366772, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/distributed/deepspeed_hook.py": 1693424430.2366772, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/hooks/hook.py": 1693424430.2316768, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/audio/kws_farfield_trainer.py": 1693424430.2206762, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/audio/separation_trainer.py": 1693424430.221676, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/audio/asr_trainer.py": 1693424430.219676, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/audio/kws_utils/file_utils.py": 1693424430.2236762, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/audio/kws_utils/batch_utils.py": 1693424430.2226763, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/audio/kws_utils/model_utils.py": 1693424430.2236762, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/audio/kws_utils/runtime_utils.py": 1693424430.2236762, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/audio/kws_utils/det_utils.py": 1693424430.2226763, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/audio/tts_trainer.py": 1693424430.221676, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/audio/ans_trainer.py": 1693424430.219676, "/usr/local/lib/python3.10/dist-packages/modelscope/trainers/audio/kws_nearfield_trainer.py": 1693424430.2206762, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/context/dataset_context_config.py": 1693424430.0286634, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/data_loader/data_loader_manager.py": 1693424430.0306635, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/data_loader/data_loader.py": 1693424430.0306635, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/task_datasets/torch_base_dataset.py": 1693424430.0636659, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/task_datasets/sidd_image_denoising.py": 1693424430.0636659, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/task_datasets/reds_image_deblurring_dataset.py": 1693424430.0626657, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/task_datasets/video_summarization_dataset.py": 1693424430.0636659, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/task_datasets/gopro_image_deblurring_dataset.py": 1693424430.0626657, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/download/download_manager.py": 1693424430.0616655, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/download/download_config.py": 1693424430.0606656, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/download/dataset_builder.py": 1693424430.0606656, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/ms_dataset.py": 1693424430.0266633, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/utils/delete_utils.py": 1693424430.0646658, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/utils/oss_utils.py": 1693424430.065666, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/utils/dataset_utils.py": 1693424430.0646658, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/utils/maxcompute_utils.py": 1693424430.065666, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/utils/upload_utils.py": 1693424430.065666, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/auth/auth_config.py": 1693424430.0286634, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/dataset.py": 1693424430.0316637, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/text_ranking_dataset.py": 1693424430.034664, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/video_frame_interpolation/video_frame_interpolation_dataset.py": 1693424430.0586655, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/video_frame_interpolation/data_utils.py": 1693424430.0586655, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/image_portrait_enhancement/image_portrait_enhancement_dataset.py": 1693424430.0466647, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/image_portrait_enhancement/data_utils.py": 1693424430.0456645, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/damoyolo/build.py": 1693424430.038664, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/damoyolo/transforms/build.py": 1693424430.0436645, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/damoyolo/transforms/transforms.py": 1693424430.0436645, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/damoyolo/samplers/distributed.py": 1693424430.0416644, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/damoyolo/samplers/iteration_based_batch_sampler.py": 1693424430.0426643, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/damoyolo/samplers/grouped_batch_sampler.py": 1693424430.0426643, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/damoyolo/datasets/mosaic_wrapper.py": 1693424430.0406642, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/damoyolo/datasets/coco.py": 1693424430.0396643, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/damoyolo/collate_batch.py": 1693424430.038664, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/damoyolo/evaluation/coco/coco_eval.py": 1693424430.0416644, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/easycv_base.py": 1693424430.0326638, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/ocr_detection/measures/quad_measurer.py": 1693424430.051665, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/ocr_detection/measures/iou_evaluator.py": 1693424430.050665, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/ocr_detection/data_loader.py": 1693424430.0496647, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/ocr_detection/processes/make_seg_detection_data.py": 1693424430.0536652, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/ocr_detection/processes/make_border_map.py": 1693424430.052665, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/ocr_detection/processes/augment_data.py": 1693424430.051665, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/ocr_detection/processes/data_process.py": 1693424430.052665, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/ocr_detection/processes/normalize_image.py": 1693424430.054665, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/ocr_detection/processes/make_icdar_data.py": 1693424430.0536652, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/ocr_detection/processes/random_crop_data.py": 1693424430.054665, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/ocr_detection/augmenter.py": 1693424430.0496647, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/ocr_detection/image_dataset.py": 1693424430.050665, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/mgeo_ranking_dataset.py": 1693424430.0336637, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/bad_image_detecting/bad_image_detecting_dataset.py": 1693424430.037664, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/image_colorization/image_colorization_dataset.py": 1693424430.0446644, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/image_inpainting/aug.py": 1693424430.0446644, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/image_inpainting/image_inpainting_dataset.py": 1693424430.0456645, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/video_super_resolution/video_super_resolution_dataset.py": 1693424430.0596654, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/reds_image_deblurring_dataset.py": 1693424430.034664, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/movie_scene_segmentation/sampler.py": 1693424430.0486648, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/movie_scene_segmentation/movie_scene_segmentation_dataset.py": 1693424430.0486648, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/image_quality_assessment_degradation/image_quality_assessment_degradation_dataset.py": 1693424430.0466647, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/image_instance_segmentation_coco_dataset.py": 1693424430.0336637, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/builder.py": 1693424430.0326638, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/sidd_image_denoising/sidd_image_denoising_dataset.py": 1693424430.0576653, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/sidd_image_denoising/transforms.py": 1693424430.0576653, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/sidd_image_denoising/data_utils.py": 1693424430.0566652, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/video_summarization_dataset.py": 1693424430.0356638, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/video_stabilization/video_stabilization_dataset.py": 1693424430.0586655, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/veco_dataset.py": 1693424430.0356638, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/image_quality_assmessment_mos/image_quality_assessment_mos_dataset.py": 1693424430.0476646, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py": 1693424430.036664, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/audio/kws_nearfield_processor.py": 1693424430.037664, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/audio/kws_farfield_dataset.py": 1693424430.036664, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/audio/kws_nearfield_dataset.py": 1693424430.036664, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/gopro_image_deblurring_dataset.py": 1693424430.0326638, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/ocr_recognition_dataset.py": 1693424430.034664, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/language_guided_video_summarization_dataset.py": 1693424430.0336637, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/torch_custom_dataset.py": 1693424430.034664, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/referring_video_object_segmentation/referring_video_object_segmentation_dataset.py": 1693424430.0556653, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/dataset_cls/custom_datasets/referring_video_object_segmentation/transformers.py": 1693424430.0556653, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/audio/asr_dataset.py": 1693424430.0276635, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/data_files/data_files_manager.py": 1693424430.0296636, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/meta/data_meta_config.py": 1693424430.0616655, "/usr/local/lib/python3.10/dist-packages/modelscope/msdatasets/meta/data_meta_manager.py": 1693424430.0616655, "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/tf_model_exporter.py": 1693424429.4006221, "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/model_for_token_classification_exporter.py": 1693424429.4046223, "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/csanmt_for_translation_exporter.py": 1693424429.4036224, "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/sbert_for_sequence_classification_exporter.py": 1693424429.4046223, "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/nlp/sbert_for_zero_shot_classification_exporter.py": 1693424429.4046223, "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/base.py": 1693424429.399622, "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/torch_model_exporter.py": 1693424429.4006221, "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/builder.py": 1693424429.399622, "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/cv/face_detection_scrfd_exporter.py": 1693424429.4026222, "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/cv/object_detection_damoyolo_exporter.py": 1693424429.4026222, "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/cv/cartoon_translation_exporter.py": 1693424429.4016223, "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/multi_modal/stable_diffusion_exporter.py": 1693424429.4036224, "/usr/local/lib/python3.10/dist-packages/modelscope/exporters/audio/ans_dfsmn_exporter.py": 1693424429.4016223}, "modelscope_path": "/usr/local/lib/python3.10/dist-packages/modelscope"} \ No newline at end of file