# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TREC2014 Dataset"""
import os
from pathlib import Path
import numpy as np
import scipy.sparse as sp
import argparse
import json
import random
from scipy.sparse import csr_matrix

import mindspore.context as context
from mindspore import Tensor, nn
import mindspore as ms

from mindspore_gl.graph import MindHomoGraph, CsrAdj
from .base_dataset import BaseDataSet

from utils import load_dict


class TREC(BaseDataSet):
    def __init__(self, root, args):

        self._root = Path(root)

        self._csr_row = None
        self._csr_col = None
        self._nodes = None

        self._node_feat = None
        self._node_label = None

        self._train_mask = None
        self._val_mask = None
        self._test_mask = None
        self._npz_file = None

        self.mode = args.mode
        self.max_d_num = args.max_d_num

        # if os.path.isfile(self._path) and os.path.exists(self._path):
        #     self._load()
        if os.path.exists(self._root):
            self._preprocess(args)
        else:
            raise Exception('data file does not exist.')

    def _preprocess(self, args):
        """process data"""
        query_qid = load_dict(args.output, 'query_qid.dict')
        url_uid = load_dict(args.output, 'url_uid.dict')

        # Calc edge information for train/valid/test set
        set_names = ['train', 'valid', 'test']
        qid_edges, uid_edges = set(), set()
        qid_neighbors, uid_neighbors = {qid: set() for qid in range(len(query_qid))}, {uid: set() for uid in
                                                                                       range(len(url_uid))}
        for set_name in set_names:
            print('  - {}'.format('Constructing relations in {} set'.format(set_name)))
            lines = open(os.path.join(args.output, '{}_per_query_quid.txt'.format(set_name))).readlines()

            # Relation 0: Query-Query within the same session
            cur_sid = -1
            qid_set = set()
            for line in lines:
                attr = line.strip().split('\t')
                sid = int(attr[0].strip())
                qid = int(attr[1].strip())
                if cur_sid == sid:
                    # query in the same session
                    qid_set.add(qid)
                else:
                    # session ends, start creating relations
                    qid_list = list(qid_set)
                    for i in range(1, len(qid_list)):
                        qid_edges.add(str([qid_list[i], qid_list[i - 1]]))
                        qid_edges.add(str([qid_list[i - 1], qid_list[i]]))
                    # new session starts
                    cur_sid = sid
                    qid_set.clear()
                    qid_set.add(qid)
            # The last session
            qid_list = list(qid_set)
            for i in range(1, len(qid_list)):
                qid_edges.add(str([qid_list[i], qid_list[i - 1]]))
                qid_edges.add(str([qid_list[i - 1], qid_list[i]]))

            # Relation 1 & 2: Document of is clicked in a Query
            for line in lines:
                attr = line.strip().split('\t')
                qid = int(attr[1].strip())
                uids = json.loads(attr[2].strip())
                clicks = json.loads(attr[4].strip())
                for uid, click in zip(uids, clicks):
                    if click:
                        if set_name == 'train' or set_name == 'demo':
                            qid_neighbors[qid].add(uid)  # 在第i次查询中，即第i个qid_neighbors中存储所点击的文档id
                            uid_neighbors[uid].add(qid)  # 某个文档被点击，在第某个uid_neighbors中存储所查询的qid号

            # Relation 3: successive Documents in the same query
            # 第三种关系3：同一查询中的连续文档
            for line in lines:
                attr = line.strip().split('\t')
                uids = json.loads(attr[2].strip())
                for i in range(1, len(uids)):
                    uid_edges.add(str([uids[i], uids[i - 1]]))
                    uid_edges.add(str([uids[i - 1], uids[i]]))
        # qid_edges首先存储从train、valid、test中的同一个会话中，查询-查询的关系，其次接着存储点击同一文档的查询-查询之间的关系,最后是每个qid自身-自身之间的关系。
        # uid_edges首先存储从train、valid到test的同一查询中的连续文档-文档之间的关系，接着存储第i次查询中连续点击的文档-文档之间的关系，最后是每个uid自身-自身之间的关系。
        # Meta-path to q-q & u-u
        # 第i个qid_neighbors中存储第i次查询中所点击的文档id
        # 第j个uid_neighbors中存储第j个文档被第i次查询所使用的查询id号
        for qid in qid_neighbors:
            qid_neigh = list(qid_neighbors[qid])
            for i in range(len(qid_neigh)):
                for j in range(i + 1, len(qid_neigh)):
                    uid_edges.add(str([qid_neigh[i], qid_neigh[j]]))
                    uid_edges.add(str([qid_neigh[j], qid_neigh[i]]))
        for uid in uid_neighbors:
            uid_neigh = list(uid_neighbors[uid])
            for i in range(len(uid_neigh)):
                for j in range(i + 1, len(uid_neigh)):
                    qid_edges.add(str([uid_neigh[i], uid_neigh[j]]))
                    qid_edges.add(str([uid_neigh[j], uid_neigh[i]]))

        # Add self-loop
        for qid in range(len(query_qid)):
            qid_edges.add(str([qid, qid]))
        for uid in range(len(url_uid)):
            uid_edges.add(str([uid, uid]))

        # Convert & save edges information from set/list into tensor
        # 将边信息从原始的set/list保存为tensor形式
        qid_edges = [eval(edge) for edge in qid_edges]  # 首先转为list格式
        uid_edges = [eval(edge) for edge in uid_edges]
        qid_edge_index = Tensor.from_numpy(np.array(qid_edges, dtype=np.int64)).transpose((1, 0))
        uid_edge_index = Tensor.from_numpy(np.array(uid_edges, dtype=np.int64)).transpose((1, 0))

        # np.savez('qid_uid_edge.npz', qid_edges=qid_edge_index, uid_edges=uid_edge_index)
        # with np.load('qid_uid_edge.npz') as data:
        #     qid_edges_ = data['qid_edges']
        # Count degrees of qid/uid nodes 计算qid/uid节点的度
        qid_degrees, uid_degrees = [set([i]) for i in range(len(query_qid))], [set([i]) for i in range(len(url_uid))]
        for qid_edge in qid_edges:
            qid_degrees[qid_edge[0]].add(qid_edge[1])
            qid_degrees[qid_edge[1]].add(qid_edge[0])
        for uid_edge in uid_edges:
            uid_degrees[uid_edge[0]].add(uid_edge[1])
            uid_degrees[uid_edge[1]].add(uid_edge[0])
        qid_degrees = [len(d_set) for d_set in qid_degrees]
        uid_degrees = [len(d_set) for d_set in uid_degrees]
        non_isolated_qid_cnt = sum([1 if qid_degree > 1 else 0 for qid_degree in qid_degrees])
        non_isolated_uid_cnt = sum([1 if uid_degree > 1 else 0 for uid_degree in uid_degrees])
        print('  - {}'.format(
            'Mean/Max/Min qid degree: {}, {}, {}'.format(sum(qid_degrees) / len(qid_degrees), max(qid_degrees),
                                                         min(qid_degrees))))
        print('  - {}'.format(
            'Mean/Max/Min uid degree: {}, {}, {}'.format(sum(uid_degrees) / len(uid_degrees), max(uid_degrees),
                                                         min(uid_degrees))))
        print('  - {}'.format('Non-isolated qid node num: {}'.format(non_isolated_qid_cnt)))
        print('  - {}'.format('Non-isolated uid node num: {}'.format(non_isolated_uid_cnt)))

        # Save direct uid-uid neighbors for neighbor feature interactions
        uid_num = len(url_uid)
        max_node_degree = 64
        uid_neigh = [set([i]) for i in range(uid_num)]
        uid_neigh_sampler = nn.Embedding(uid_num, max_node_degree)
        for edge in uid_edges:
            src, dst = edge[0], edge[1]
            uid_neigh[src].add(dst)
            uid_neigh[dst].add(src)
        for idx, adj in enumerate(uid_neigh):
            adj_list = list(adj)
            if len(adj_list) >= max_node_degree:
                adj_sample = Tensor.from_numpy(np.array(random.sample(adj_list, max_node_degree), dtype=np.int64))
            else:
                adj_sample = Tensor.from_numpy(np.array(random.choices(adj_list, k=max_node_degree), dtype=np.int64))
            uid_neigh_sampler.embedding_table.data[idx] = adj_sample

        print(type(uid_neigh_sampler.embedding_table.data))

        if self.mode == 'train':
            self.data_set = self.load_dataset(os.path.join(self._root, 'train_per_query_quid.txt'), mode=self.mode)
        if self.mode == 'valid':
            self.data_set = self.load_dataset(os.path.join(self._root, 'valid_per_query_quid.txt'), mode=self.mode)
        if self.mode == 'test':
            self.data_set = self.load_dataset(os.path.join(self._root, 'test_per_query_quid.txt'), mode=self.mode)

        qid_row = qid_edge_index[0]
        qid_col = qid_edge_index[1]
        uid_row = uid_edge_index[0]
        uid_col = uid_edge_index[1]

        query_qid = load_dict(self._root, 'query_qid.dict')
        url_uid = load_dict(self._root, 'url_uid.dict')
        query_size = len(query_qid)
        doc_size = len(url_uid)

        qid_data = np.ones(qid_row.shape)
        uid_data = np.ones(uid_row.shape)
        qid_node_count = query_size + 1
        uid_node_count = doc_size + 1
        qid_csr_mat = sp.csr_matrix((qid_data, (qid_row, qid_col)), shape=(qid_node_count, qid_node_count))
        uid_csr_mat = sp.csr_matrix((uid_data, (uid_row, uid_col)), shape=(uid_node_count, uid_node_count))

        qid_generated_graph = MindHomoGraph()
        qid_node_dict = {idx: idx for idx in range(qid_node_count)}
        qid_edge_count = qid_col.shape[0]
        qid_edge_ids = np.array(list(range(qid_edge_count))).astype(np.int32)
        qid_generated_graph.set_topo(CsrAdj(qid_csr_mat.indptr.astype(np.int32), qid_csr_mat.indices.astype(np.int32)),
                                     qid_node_dict, qid_edge_ids)

        uid_generated_graph = MindHomoGraph()
        uid_node_dict = {idx: idx for idx in range(uid_node_count)}
        uid_edge_count = uid_col.shape[0]
        uid_edge_ids = np.array(list(range(uid_edge_count))).astype(np.int32)
        uid_generated_graph.set_topo(CsrAdj(uid_csr_mat.indptr.astype(np.int32), uid_csr_mat.indices.astype(np.int32)),
                                     uid_node_dict, uid_edge_ids)
        return qid_neighbors

    def load_dataset(self, data_path, mode):
        """
        Loads the dataset
        """
        data_set = []
        lines = open(data_path).readlines()  # 按行读取
        previous_sid = -1
        qids, uids, vids, clicks, relevances = [], [], [], [], []
        for line in lines:
            attr = line.strip().split('\t')  # 删除每一行的空格符（split('\t') ）以及换行符
            sid = int(attr[0].strip())
            if previous_sid != sid:  # 如果不是最后一行
                # a new session starts
                if previous_sid != -1:
                    assert len(uids) // 10 == len(qids)
                    assert len(vids) // 10 == len(qids)
                    assert len(relevances) // 10 == len(qids)
                    assert (len(clicks) - 1) // 10 == len(qids)
                    last_rank = 0
                    for idx, click in enumerate(clicks[1:]):
                        last_rank = idx + 1 if click else last_rank
                    relevance_start = 0
                    for idx, relevance in enumerate(relevances):
                        if relevance != -1:
                            relevance_start = idx
                            assert relevance_start % 10 == 0
                            break
                    data_set.append({'sid': previous_sid,
                                     'qids': qids,
                                     'uids': uids,
                                     'vids': vids,
                                     'clicks': clicks,
                                     'last_rank': last_rank,
                                     'relevances': relevances[relevance_start: relevance_start + 10],
                                     'relevance_start': relevance_start})
                previous_sid = sid
                qids = [int(attr[1].strip())]
                uids = json.loads(attr[2].strip())
                vids = json.loads(attr[3].strip())
                clicks = [0] + json.loads(attr[4].strip())
                relevances = json.loads(attr[5].strip()) if mode == 'label' else [0 for _ in range(self.max_d_num)]
            else:
                # the previous session continues
                qids.append(int(attr[1].strip()))
                uids = uids + json.loads(attr[2].strip())
                vids = vids + json.loads(attr[3].strip())
                clicks = clicks + json.loads(attr[4].strip())
                relevances = relevances + (
                    json.loads(attr[5].strip()) if mode == 'label' else [0 for _ in range(self.max_d_num)])
        last_rank = 0
        for idx, click in enumerate(clicks[1:]):
            last_rank = idx + 1 if click else last_rank
        relevance_start = 0
        for idx, relevance in enumerate(relevances):
            if relevance != -1:
                relevance_start = idx
                assert relevance_start % 10 == 0
                break
        data_set.append({'sid': previous_sid,
                         'qids': qids,
                         'uids': uids,
                         'vids': vids,
                         'clicks': clicks,
                         'last_rank': last_rank,
                         'relevances': relevances[relevance_start: relevance_start + 10],
                         'relevance_start': relevance_start})
        return data_set

    @property
    def node_feat_size(self):
        """
        Feature size of each node.

        Returns:
            - int, the number of feature size.

        Examples:
            >>> #dataset is an instance object of Dataset
            >>> node_feat_size = dataset.node_feat_size
        """
        return self.node_feat.shape[1]


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='TREC2014')
    parser.add_argument("--device_id", type=int, default=5, help="which device id to use")
    parser.add_argument("--device", type=str, default="CPU", help="which device to use")
    parser.add_argument("--data_path", type=str, default='../data/TREC2014', help="path to dataset")
    parser.add_argument('--output', default='../data/TREC2014',
                        help='output path')
    parser.add_argument('--max_d_num', type=int, default=10,
                        help='max number of docs in a session')
    parser.add_argument('--mode', type=str, default="train", help="which mode to use")

    args = parser.parse_args()

    context.set_context(device_target=args.device, mode=context.GRAPH_MODE, enable_graph_kernel=True,
                        device_id=args.device_id)
    dataset = TREC(root=args.data_path, args=args)
    dataset._preprocess(args)