File size: 3,525 Bytes
88f3761
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
from sentence_transformers import SentenceTransformer
from mteb import MTEB
from mteb.abstasks.AbsTaskRetrieval import AbsTaskRetrieval
from datasets import DatasetDict
from collections import defaultdict
import pandas as pd
def load_dataset(path):
    df = pd.read_parquet(path, engine="pyarrow")
    return df

def load_retrieval_data(path):
    eval_split = 'dev'

    corpus = {e['cid']: {'text': e['text']} for i, e in load_dataset(path + r'\data\corpus.parquet.gz').iterrows()}
    queries = {e['qid']: e['text'] for i, e in load_dataset(path + r'\data\queries.parquet.gz').iterrows()}
    relevant_docs = defaultdict(dict)
    for i, e in load_dataset(path + r'\data\qrels.parquet.gz').iterrows():
        relevant_docs[e['qid']][e['cid']] = e['score']

    corpus = DatasetDict({eval_split: corpus})
    queries = DatasetDict({eval_split: queries})
    relevant_docs = DatasetDict({eval_split: relevant_docs})
    return corpus, queries, relevant_docs

# conda install sentence-transformers -c conda-forge
# $env:HF_ENDPOINT="https://hf-mirror.com"; python -c "from huggingface_hub import snapshot_download; snapshot_download(repo_id='DMetaSoul/Dmeta-embedding', local_dir=r'D:\models\Dmeta')"
# pip install pytrec-eval-terrier
# 修改 envs\HelloGPT\lib\site-packages\pip\_vendor\resolvelib\resolvers.py 的 Resolution 对象
# 的 _get_updated_criteria 方法,给 for 循环里添加如下代码
#     def _get_updated_criteria(self, candidate):
#         criteria = self.state.criteria.copy()
#         for requirement in self._p.get_dependencies(candidate=candidate):
#             if 'pytrec-eval' in repr(requirement):
#                 continue
#             self._add_to_criteria(criteria, requirement, parent=candidate)
#         return criteria
# pip install mteb[beir] -i https://pypi.tuna.tsinghua.edu.cn/simple/  # 需要开 tun 模式

# mteb 会给 encode 的 batch_size 设置 128, 显存不够得手动修改 SentenceTransformer.py
# def encode 的相关内容, 将 batch_size 强制调回 32, 添加一行 batch_size = 32
model = SentenceTransformer(r'D:\models\Dmeta', device='cuda:0')

texts1 = ["胡子长得太快怎么办?", "在香港哪里买手表好"]
texts2 = ["胡子长得快怎么办?", "怎样使胡子不浓密!", "香港买手表哪里好", "在杭州手机到哪里买"]
embs1 = model.encode(texts1, normalize_embeddings=True)
embs2 = model.encode(texts2, normalize_embeddings=True)
similarity = embs1 @ embs2.T
print(similarity)

class H2Retrieval(AbsTaskRetrieval):
    @property
    def description(self):
        return {
            'name': 'H2Retrieval',
            'hf_hub_name': 'Limour/H2Retrieval',
            'reference': 'https://huggingface.co/datasets/a686d380/h-corpus-2023',
            'description': 'h-corpus 领域的 Retrieval 评价数据集。',
            'type': 'Retrieval',
            'category': 's2p',
            'eval_splits': ['dev'],
            'eval_langs': ['zh'],
            'main_score': 'ndcg_at_10'
        }

    def load_data(self, **kwargs):
        if self.data_loaded:
            return

        self.corpus, self.queries, self.relevant_docs = load_retrieval_data(r'D:\datasets\H2Retrieval')
        self.data_loaded = True

evaluation = MTEB(tasks=[H2Retrieval()])
evaluation.run(model)
# torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 25.34 GiB.
# return torch.mm(a_norm, b_norm.transpose(0, 1)) #TODO: this keeps allocating GPU memory
# 无语了,最耗时间的跑完了,这里给我整不会了