import gc
import logging
import math
import os.path as osp
import pickle
import random
import threading
from concurrent.futures import ThreadPoolExecutor
from typing import Final

import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as spalg
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from tqdm import trange, tqdm

import LPSI as LPSI
import file_cache
import lzyutil
from config import args
from data_loader import GraphDataset
import timing
import torch

DENSE_MAT_SIZE = 10_000_000_000

INFECTED: Final = 1
UNINFECTED: Final = 0
RECOVERED: Final = 2


class PropagationModel(Dataset):
    def __init__(self, dataset: GraphDataset, batch_size: int, seq_len_ub: int, src_num: int, iter_num: int,
                 infect_rate: float, recover_rate=.0,
                 lb=.0, ub=0.7, type_str=None, update_cache=False):
        # 形状为[]
        super().__init__()
        self._data_group: list[dict] = []
        self._infect_rate_list = None
        self._recover_rate_list = None
        self._infect_indeg_list = None
        self._batch_size = batch_size
        self._seq_len_ub = seq_len_ub
        self._dataset: GraphDataset = dataset
        self._numnodes = dataset.get_size()
        self._part_size = math.ceil(DENSE_MAT_SIZE / self._numnodes)
        self._src_num = src_num
        self._gen_steps = iter_num
        self._infect_rate = infect_rate
        if type_str is not "SIR":
            recover_rate = 0
        self._recover_rate = recover_rate
        self._lbound_I = -1
        self._rbound_I = -1
        self._update_cache = update_cache
        self._cache_found = False

        self.cache_path_prefix = (f'{dataset.dataset_name}'
                                  f'-{src_num}'
                                  f'-{infect_rate:.4f}'
                                  f'-{recover_rate:.4f}'
                                  f'-{(lb, ub)}'
                                  f'-{batch_size}X{seq_len_ub}'
                                  f'-{type_str}labels')

        self.lpsi_cache_path = f'lpsi_{self.cache_path_prefix}'
        self.lpsi_inv_mat_path = f'lpsi_inv_mat_{dataset.dataset_name}'

        meta_path = file_cache.to_cache_path(self.meta_path())
        if type_str is not None and osp.exists(meta_path) and not update_cache:
            self._cache_found = True
            with open(meta_path, 'rb') as fin:
                self._infect_rate_list, self._recover_rate_list, self._infect_indeg_list = \
                    pickle.load(fin)
        else:
            # TODO: 为每个节点设定不同的感染/恢复率
            self._infect_rate_list = np.full(
                self._numnodes, infect_rate, dtype=np.float32)
            self._recover_rate_list = np.full(
                self._numnodes, recover_rate, dtype=np.float32)
            self._indeg_list = np.asarray(
                self._dataset.get_adjacency_coo().sum(axis=0)).flatten()
            print(self._infect_rate_list)
            assert self._indeg_list.shape == self._infect_rate_list.shape
            self._infect_indeg_list = self._indeg_list * self._infect_rate_list

        self._data_group = [None] * self._batch_size

    def __len__(self):
        return self._batch_size

    def __getitem__(self, item) -> dict:
        assert item < self._batch_size

        ok, data = file_cache.load(self.data_path(item))
        if not ok:
            return None
        return data

    def get_meta(self, item: list[int]) -> torch.Tensor:
        # (numnodes, 3)
        ans = torch.stack(
            (
                torch.tensor(self._infect_indeg_list),
                torch.tensor(self._infect_rate_list),
                torch.tensor(self._recover_rate_list)
            ), dim=-1
        )
        return ans.repeat(self._seq_len_ub, len(item), 1, 1)

    def get_data(self):
        return self._data_group

    def data_path(self, part_id):
        return self.cache_path_prefix + f"-part{part_id}"

    def meta_path(self):
        return self.cache_path_prefix + "-meta"

    def generate_and_store_lpsi_data(self):
        coo_adj = self._dataset.get_adjacency_coo()
        nl_coo_adj = self._dataset.get_norm_laplacian_coo()

        def gen_lpsi_routine(idx):
            data_item = self._data_group[idx]
            infected_list = data_item['data']
            seq_len = len(infected_list)

            # LPSI+
            positive_row_ids = []
            positive_col_ids = []

            # LPSI-
            negative_row_ids = []
            negative_col_ids = []

            # y_tensor
            y_label = np.full(
                (self._seq_len_ub, self._numnodes), -1, dtype=np.int8)

            for seq_id, infected in enumerate(infected_list):
                positive_row_ids += [seq_id] * len(infected)
                positive_col_ids += infected.tolist()
                uninfected_nodes = np.setdiff1d(
                    np.arange(self._numnodes), infected)
                negative_row_ids += [seq_id]*len(uninfected_nodes)
                negative_col_ids += uninfected_nodes.tolist()
                y_label[seq_id, infected] = INFECTED

            lpsi_positive_coo = sp.coo_matrix((
                [1] * len(positive_row_ids),
                (positive_row_ids, positive_col_ids)
            ),
                shape=(seq_len, self._numnodes))

            lpsi_negative_coo = sp.coo_matrix((
                [-1] * len(negative_row_ids),
                (negative_row_ids, negative_col_ids)
            ),
                shape=(seq_len, self._numnodes))

            positive_lpsi_data = LPSI.LPSI_batch_iter_process(nl_coo_adj, args.lpsi_alpha, lpsi_positive_coo,
                                                              normalized=True,
                                                              iter_step=args.lpsi_iter_step).toarray()
            negative_lpsi_data = LPSI.LPSI_batch_iter_process(nl_coo_adj, args.lpsi_alpha, lpsi_negative_coo,
                                                              normalized=True,
                                                              iter_step=args.lpsi_iter_step).toarray()
            return idx, y_label, positive_lpsi_data, negative_lpsi_data

        with ThreadPoolExecutor() as executor:
            for rst in tqdm(executor.map(gen_lpsi_routine, range(self._batch_size)),
                            desc='Generating lpsi data',
                            total=self._batch_size):
                idx, y_label, positive_lpsi_data, negative_lpsi_data = rst
                file_cache.register(self.data_path(idx), {
                    'id': self._data_group[idx]['id'],
                    'gt': self._data_group[idx]['gt'],
                    'y':  y_label,
                    'lpsi_positive': positive_lpsi_data,
                    'lpsi_negative': negative_lpsi_data
                }, self._update_cache)
        file_cache.register(self.meta_path(),
                            (self._infect_rate_list, self._recover_rate_list,
                             self._infect_indeg_list),
                            self._update_cache)

        del self._data_group


use_concurrent = False


class SIDataSet(PropagationModel):
    def __init__(self, dataset: GraphDataset,
                 batch_size=15, seq_len_ub=30,
                 iter_num=1000, src_num=5, infect_rate=0.1, lb=0.1, ub=0.7, update_cache=False):
        super().__init__(dataset, batch_size, seq_len_ub, src_num, iter_num, infect_rate,
                         lb=lb, ub=ub,
                         type_str="SI",
                         update_cache=update_cache)

        if self._cache_found:
            return

        for step in range(self._batch_size):
            # generate infection source
            batch_src_list = random.sample(
                range(self._numnodes), self._src_num)

            # total infection labels
            labels: list[np.array] = [None] * self._gen_steps

            # generate infection sequence (batches)
            state = np.zeros(self._numnodes, dtype=np.int8)  # 0 for S,1 for I
            state[batch_src_list] = INFECTED

            max_ts = self._dataset.max_ts
            min_ts = math.ceil(args.dgraph_start * max_ts)

            if self._dataset.is_dynamic:
                partition = lzyutil.Partition(
                    math.floor((max_ts - min_ts) * args.lp_duration), part=self._gen_steps)
                cur_ts_list = partition.get_end_idx()
            else:
                cur_ts_list = [0] * self._gen_steps

            for seq_idx in tqdm(list(range(self._gen_steps)), desc=f'Generating labels for batch {step}',
                                leave=True):
                self._dataset.cur_ts = cur_ts_list[seq_idx]
                csr_graph = self._dataset.get_adjacency_csr()

                cur_infect_src_list = np.where(state == INFECTED)[0]
                # 分块处理原点传播，防止内存不足
                partition = lzyutil.Partition(
                    len(cur_infect_src_list), part_size=self._part_size)
                src2adj = dict()
                for startI, endE in partition.get_range_ie():
                    src_node, adj_node = csr_graph[cur_infect_src_list[startI:endE]].nonzero(
                    )
                    uninfected_indices = state[adj_node] == UNINFECTED
                    src_node = src_node[uninfected_indices]
                    adj_node = adj_node[uninfected_indices]
                    for src_idx, adj in zip(src_node, adj_node):
                        src = cur_infect_src_list[src_idx + startI]
                        if src not in src2adj:
                            src2adj[src] = []
                        src2adj[src].append(adj)

                for src_node, adj_nodes in src2adj.items():
                    adj_np = np.array(adj_nodes, dtype=np.uint64)
                    if len(adj_nodes) == 0:
                        continue
                    infect_result = np.where(
                        np.random.uniform(
                            0, 1, len(adj_nodes)) < self._infect_rate_list[adj_nodes],
                        True, False)
                    assert infect_result.shape == adj_np.shape
                    state[adj_np[infect_result]] = INFECTED

                labels[seq_idx] = np.where(state == INFECTED)[0]

                line = labels[seq_idx]
                active_len = len(line)

                if active_len / self._numnodes >= ub:
                    assert seq_idx > 0
                    self._rbound_I = seq_idx - 1
                    break
                if active_len / self._numnodes >= lb and self._lbound_I == -1:
                    self._lbound_I = seq_idx

                print(f'seq_idx:{seq_idx + 1}/{self._gen_steps}, '
                      f'infected cnt : {active_len}, '
                      f'infected_ratio:{(active_len / self._numnodes):.4f}')

                if self._dataset.is_dynamic:
                    pass
                    # logging.info(f"cur graph edge cnt:{csr_graph.nnz}")
            assert self._lbound_I != -1
            if self._rbound_I == -1:
                self._rbound_I = self._gen_steps - 1

            # 减去可能的多余序列
            while self._rbound_I - self._lbound_I >= self._seq_len_ub - 1:
                self._rbound_I -= 1

            for i in range(self._lbound_I, self._rbound_I + 1):
                labels[i - self._lbound_I] = labels[i]

            # 补充序列
            i = self._rbound_I - self._lbound_I
            while i < self._seq_len_ub - 1:
                labels[i + 1] = labels[i]
                i += 1

            active_ratio_list = [
                len(labels[i]) / self._numnodes for i in range(self._seq_len_ub)
            ]
            logging.info(f'active_ratio_list: {active_ratio_list}')

            self._data_group[step] = {'id': step, 'gt': np.array(
                batch_src_list), 'data': labels[:self._seq_len_ub]}

        self.generate_and_store_lpsi_data()


class SIRDataSet(PropagationModel):
    def __init__(self, dataset: GraphDataset, batch_size, seq_len_ub, iter_num=1000, src_num=5,
                 infect_rate=0.3, recover_rate=0, lb=0.1, ub=0.7,
                 update_cache=False):
        super().__init__(dataset, batch_size, seq_len_ub, src_num, iter_num, infect_rate,
                         recover_rate=recover_rate,
                         lb=lb, ub=ub,
                         type_str="SIR",
                         update_cache=update_cache)

        if self._cache_found:
            return

        for step in range(self._batch_size):
            # generate infection source
            batch_src_list = random.sample(
                range(self._numnodes), self._src_num)

            # total infection labels
            labels: list[np.array] = [None] * self._gen_steps

            # temp: 0 for S,1 for I, 2 for R
            state = np.zeros(self._numnodes, dtype=np.int8)
            state[batch_src_list] = INFECTED

            max_ts = self._dataset.max_ts
            min_ts = math.ceil(args.dgraph_start * max_ts)
            if self._dataset.is_dynamic:
                partition = lzyutil.Partition(
                    math.floor((max_ts - min_ts) * args.lp_duration), part=self._gen_steps)
                cur_ts_list = partition.get_end_idx()
            else:
                cur_ts_list = [0] * self._gen_steps

            for seq_idx in tqdm(list(range(self._gen_steps)), desc=f'Generating labels for batch {step}',
                                leave=True):
                self._dataset.cur_ts = cur_ts_list[seq_idx]
                csr_graph = self._dataset.get_adjacency_csr()

                cur_infect_src_list = np.where(state == INFECTED)[0]

                # 分块处理原点传播，防止内存不足
                partition = lzyutil.Partition(
                    len(cur_infect_src_list), part_size=self._part_size)
                src2adj = dict()
                for startI, endE in partition.get_range_ie():
                    src_node, adj_node = csr_graph[cur_infect_src_list[startI:endE]].nonzero(
                    )
                    uninfected_indices = state[adj_node] == UNINFECTED
                    src_node = src_node[uninfected_indices]
                    adj_node = adj_node[uninfected_indices]
                    for src_idx, adj in zip(src_node, adj_node):
                        src = cur_infect_src_list[src_idx + startI]
                        if src not in src2adj:
                            src2adj[src] = []
                        src2adj[src].append(adj)

                for src_node, adj_nodes in src2adj.items():
                    adj_np = np.array(adj_nodes, dtype=np.uint64)
                    if len(adj_nodes) == 0:
                        continue
                    infect_result = np.where(
                        # 不会传播的部分，已经在uninfected_indices部分剔除
                        np.random.uniform(
                            0, 1, len(adj_nodes)) < self._infect_rate_list[adj_np],
                        True, False)
                    assert infect_result.shape == adj_np.shape
                    state[adj_np[infect_result]] = INFECTED

                # 计算上一轮的传播源本轮的恢复情况
                recover_result = np.where(
                    np.random.uniform(
                        0, 1, len(cur_infect_src_list)) < self._recover_rate_list[cur_infect_src_list],
                    True, False)

                state[cur_infect_src_list[recover_result]] = RECOVERED
                labels[seq_idx] = np.where(
                    state == INFECTED)[0]

                line = labels[seq_idx]
                active_len = len(line)
                if active_len / self._numnodes >= ub:
                    assert seq_idx > 0
                    self._rbound_I = seq_idx - 1
                    break
                if active_len / self._numnodes >= lb and self._lbound_I == -1:
                    self._lbound_I = seq_idx

                print(f'seq_idx:{seq_idx + 1}/{self._gen_steps}, '
                      f'infected cnt : {active_len}, '
                      f'infected_ratio:{(active_len / self._numnodes):.4f}')

                if self._dataset.is_dynamic:
                    pass
                    # logging.info(f"cur graph edge cnt:{csr_graph.nnz}")

            assert self._lbound_I != -1
            if self._rbound_I == -1:
                self._rbound_I = self._gen_steps - 1

            # 减去可能的多余序列
            while self._rbound_I - self._lbound_I >= self._seq_len_ub - 1:
                self._rbound_I -= 1

            for i in range(self._lbound_I, self._rbound_I + 1):
                labels[i - self._lbound_I] = labels[i]

            # 补充序列
            i = self._rbound_I - self._lbound_I
            while i < self._seq_len_ub - 1:
                labels[i + 1] = labels[i]
                i += 1

            active_ratio_list = [
                len(labels[i]) / self._numnodes for i in range(self._seq_len_ub)
            ]
            logging.info(f'active_ratio_list: {active_ratio_list}')

            self._data_group[step] = {'id': step, 'gt': np.array(
                batch_src_list), 'data': labels[:self._seq_len_ub]}

        self.generate_and_store_lpsi_data()


class ICDataSet(PropagationModel):

    def __init__(self, dataset: GraphDataset, batch_size, seq_len_ub,
                 iter_num=1000, src_num=5, infect_rate=0.1, lb=0.1, ub=0.7, update_cache=False):
        super().__init__(dataset, batch_size, seq_len_ub, src_num, iter_num, infect_rate,
                         lb=lb, ub=ub,
                         type_str="IC",
                         update_cache=update_cache)

        if self._cache_found:
            return

        for step in range(self._batch_size):
            # generate infection source
            batch_src_list = random.sample(
                range(self._numnodes), self._src_num)
            # total infection labels
            labels: list[np.array] = [None] * self._gen_steps

            # 0 for S,1 for I
            state = np.zeros(self._numnodes, dtype=np.int8)
            state[batch_src_list] = INFECTED

            max_ts = self._dataset.max_ts
            min_ts = math.ceil(args.dgraph_start * max_ts)

            if self._dataset.is_dynamic:
                partition = lzyutil.Partition(
                    math.floor((max_ts - min_ts) * args.lp_duration), part=self._gen_steps)
                cur_ts_list = partition.get_end_idx()
            else:
                cur_ts_list = [0] * self._gen_steps

            cur_infection_set = set(batch_src_list)
            for seq_idx in tqdm(list(range(self._gen_steps)), desc=f'Generating labels for batch {step}',
                                leave=True):
                self._dataset.cur_ts = cur_ts_list[seq_idx]
                csr_graph = self._dataset.get_adjacency_csr()
                new_infection_set = set()

                cur_infect_src_list = np.array(list(cur_infection_set))
                # 分块处理原点传播，防止内存不足
                partition = lzyutil.Partition(
                    len(cur_infect_src_list), part_size=self._part_size)
                src2adj = dict()
                for startI, endE in partition.get_range_ie():
                    src_node, adj_node = csr_graph[cur_infect_src_list[startI:endE]].nonzero(
                    )
                    uninfected_indices = state[adj_node] == UNINFECTED
                    src_node = src_node[uninfected_indices]
                    adj_node = adj_node[uninfected_indices]
                    for src_idx, adj in zip(src_node, adj_node):
                        src = cur_infect_src_list[src_idx + startI]
                        if src not in src2adj:
                            src2adj[src] = []
                        src2adj[src].append(adj)

                for src_node, adj_nodes in src2adj.items():
                    adj_np = np.array(adj_nodes, dtype=np.uint64)
                    if len(adj_nodes) == 0:
                        continue
                    infect_result = np.where(
                        np.random.uniform(
                            0, 1, len(adj_nodes)) < self._infect_rate_list[adj_nodes],
                        True, False)
                    assert infect_result.shape == adj_np.shape
                    new_infected = adj_np[infect_result]
                    state[new_infected] = INFECTED
                    new_infection_set.update(new_infected)

                labels[seq_idx] = np.where(
                    state == INFECTED)[0]
                # replace old with new set to implement IC model
                cur_infection_set = new_infection_set

                line = labels[seq_idx]
                active_len = len(line)
                # 超过上限或无法传播时，退出
                if active_len / self._numnodes >= ub or len(cur_infection_set) == 0:
                    assert seq_idx > 0
                    self._rbound_I = seq_idx - 1
                    break
                if active_len / self._numnodes >= lb and self._lbound_I == -1:
                    self._lbound_I = seq_idx

                print(f'seq_idx:{seq_idx + 1}/{self._gen_steps}, '
                      f'infected cnt : {active_len}, '
                      f'infected_ratio:{(active_len / self._numnodes):.4f}')

                if self._dataset.is_dynamic:
                    pass
                    # logging.info(f"cur graph edge cnt:{csr_graph.nnz}")

            assert self._lbound_I != -1
            if self._rbound_I == -1:
                self._rbound_I = self._gen_steps - 1

            # 减去可能的多余序列
            while self._rbound_I - self._lbound_I >= self._seq_len_ub - 1:
                self._rbound_I -= 1

            for i in range(self._lbound_I, self._rbound_I + 1):
                labels[i - self._lbound_I] = labels[i]

            # 补充序列
            i = self._rbound_I - self._lbound_I
            while i < self._seq_len_ub - 1:
                labels[i + 1] = labels[i]
                i += 1

            active_ratio_list = [
                len(labels[i]) / self._numnodes for i in range(self._seq_len_ub)
            ]
            logging.info(f'active_ratio_list: {active_ratio_list}')
            self._data_group[step] = {'id': step, 'gt': np.array(
                batch_src_list), 'data': labels[:self._seq_len_ub]}

        self.generate_and_store_lpsi_data()
        self.register_file_cache()


if __name__ == '__main__':
    dataset = "jazz"
    lb, ub = 0.01, 0.8
    seq_len_ub = 60
    data_group_size = 4
    label_dataset = SIDataSet(GraphDataset("jazz"),
                              batch_size=10,
                              seq_len_ub=seq_len_ub,
                              iter_num=args.generate_step,
                              src_num=5,
                              infect_rate=args.infect_rate,
                              lb=lb, ub=ub,
                              update_cache=args.update_cache)
    # label_dataset = SIRDataSet(GraphDataset("jazz"),
    #                            batch_size=10,
    #                            seq_len_ub=seq_len_ub,
    #                            iter_num=args.generate_step,
    #                            src_num=args.num_src,
    #                            infect_rate=args.infect_rate,
    #                            recover_rate=args.recover_rate,
    #                            lb=lb, ub=ub,
    #                            update_cache=args.update_cache)

    # label_dataset = ICDataSet(GraphDataset("jazz"),
    #                           batch_size=10,
    #                           seq_len_ub=seq_len_ub,
    #                           iter_num=args.generate_step,
    #                           src_num=args.num_src,
    #                           infect_rate=args.infect_rate * 1.5,
    #                           lb=lb, ub=ub,
    #                           update_cache=args.update_cache)

    dataloader = DataLoader(dataset=label_dataset,
                            batch_size=4,
                            shuffle=True)
    # dataloader 设置batchsize的时候，可以将dict中的数据加一维融合
    for v in dataloader:
        print(v['id'], v['gt'].shape, v['data'].shape)
        for idx, batch in enumerate(v['data']):
            print(f"----- batch {idx}:")
            for i, v in enumerate(batch):
                print(i, v[:20])
