import os
import cv2
import time
import random
import re
import string
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence
import albumentations as A
from albumentations.pytorch import ToTensorV2
from augment import SafeRotate, CropWhite, PadWhite, SaltAndPepperNoise
from utils import FORMAT_INFO
from tokenizer import PAD_ID
from chemistry import normalize_nodes
cv2.setNumThreads(1)

INDIGO_HYGROGEN_PROB = 0.2
INDIGO_FUNCTIONAL_GROUP_PROB = 0.8
INDIGO_CONDENSED_PROB = 0.5
INDIGO_RGROUP_PROB = 0.5
INDIGO_COMMENT_PROB = 0.3
INDIGO_DEARMOTIZE_PROB = 0.8
INDIGO_COLOR_PROB = 0.2

def get_transforms(input_size, augment=True, rotate=True, debug=False):
    trans_list = []
    if augment and rotate:
        trans_list.append(SafeRotate(limit=90, border_mode=cv2.BORDER_CONSTANT, value=(255, 255, 255)))
    trans_list.append(CropWhite(pad=5))

    if augment:
        trans_list += [
            A.CropAndPad(percent=[-0.01, 0.00], keep_size=False, p=0.5),
            PadWhite(pad_ratio=0.4, p=0.2),
            A.Downscale(scale_min=0.2, scale_max=0.5, interpolation=3),
            A.Blur(),
            A.GaussNoise(),
            SaltAndPepperNoise(num_dots=20, p=0.5)
        ]

    trans_list.append(A.Resize(input_size, input_size))

    if not debug:
        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]
        trans_list += [
            A.ToGray(p=1),
            A.Normalize(mean=mean, std=std),
            ToTensorV2(),
        ]

    return A.Compose(trans_list, keypoint_params=A.KeypointParams(format='xy', remove_invisible=False))




class TrainDataset(Dataset):
    def __init__(self, args, df, tokenizer = None, split='train', dynamic_indigo=False):
        super().__init__()
        self.df = df
        self.args = args
        self.tokenizer = tokenizer
        self.mask_ratio = args.mask_ratio


        if 'file_path' in df.columns:
            self.file_paths = df['file_path'].values
            if not self.file_paths[0].startswith(args.data_path):
                self.file_paths = [os.path.join(args.data_path, path) for path in df['file_path']]

        self.smiles = df['SMILES'].values if 'SMILES' in df.columns else None

        self.formats = args.formats
        self.dynamic_indigo = (dynamic_indigo and split == 'train')

        self.transform = get_transforms(args.input_size,
                                augment=(args.augment))

        if args.coords_file is None:
            self.coords_df = None
            self.pseudo_coords = args.pseudo_coords


    def __len__(self):
        return len(self.df)
  

    def image_transform(self, image, coords=[], renormalize=False):
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)  # .astype(np.float32)
        augmented = self.transform(image=image, keypoints=coords)
        image = augmented['image']
        if len(coords) > 0:
            coords = np.array(augmented['keypoints'])
            if renormalize:
                coords = normalize_nodes(coords, flip_y=False)
            else:
                _, height, width = image.shape
                coords[:, 0] = coords[:, 0] / width
                coords[:, 1] = coords[:, 1] / height
            coords = np.array(coords).clip(0, 1)
            return image, coords
        return image


    def __getitem__(self, idx):
        try:
            return self.getitem(idx)
        except Exception as e:
            with open(os.path.join(self.args.save_path, f'error_dataset_{int(time.time())}.log'), 'w') as f:
                f.write(str(e))
            raise e
   

    def getitem(self, idx):
        ref = {}
 
        file_path = self.file_paths[idx]
        image = cv2.imread(file_path)

        if image is None:
            image = np.array([[[255., 255., 255.]] * 10] * 10).astype(np.float32)
            print('=========================')
            print('not found!! not found!!!')
            print(file_path, 'not found!')
            print('=========================')

        if 'node_coords' in self.df.columns:
            h, w, _ = image.shape
            coords = np.array(eval(self.df.loc[idx, 'node_coords']))
            if self.pseudo_coords:
                coords = normalize_nodes(coords)

            coords[:, 0] = coords[:, 0] * w
            coords[:, 1] = coords[:, 1] * h
            image, coords = self.image_transform(image, coords, renormalize=self.pseudo_coords)

        else:
            raise NotImplemented

        
        smiles = self.smiles[idx]
        if 'chartok_coords' in self.formats:
            if coords is not None:
                self._process_chartok_coords(idx, ref, smiles, coords, mask_ratio=self.mask_ratio)
            else:
                raise NotImplemented


        if self.args.predict_coords and ('atomtok_coords' in self.formats or 'chartok_coords' in self.formats):
            smiles = self.smiles[idx]
            if 'chartok_coords' in self.formats:
                self._process_chartok_coords(idx, ref, smiles, mask_ratio=self.mask_ratio)

        return idx, image, ref


 
    def _process_chartok_coords(self, idx, ref, smiles, coords=None, edges=None, mask_ratio=0):

        max_len = FORMAT_INFO['chartok_coords']['max_len']
        tokenizer = self.tokenizer['chartok_coords']

        if smiles is None or type(smiles) is not str:
            smiles = ""

        label, indices = tokenizer.smiles_to_sequence(smiles, coords, mask_ratio=mask_ratio) 
        ref['chartok_coords'] = torch.LongTensor(label[:max_len])
        indices = [i for i in indices if i < max_len]
        ref['atom_indices'] = torch.LongTensor(indices)
        
        if tokenizer.continuous_coords:

            if coords is not None:
                ref['coords'] = torch.tensor(coords)
            else:
                ref['coords'] = torch.ones(len(indices), 2) * -1.

        if edges is not None:
            ref['edges'] = torch.tensor(edges)[:len(indices), :len(indices)]

        else:
            if 'edges' in self.df.columns:

                edge_list = eval(self.df.loc[idx, 'edges'])
                n = len(indices)
                edges = torch.zeros((n, n), dtype=torch.long)

                for u, v, t in edge_list:
                    if u < n and v < n:
                        if t <= 4:
                            edges[u, v] = t
                            edges[v, u] = t
                        else:
                            edges[u, v] = t
                            edges[v, u] = 11 - t

                ref['edges'] = edges
            else:
                ref['edges'] = torch.ones(len(indices), len(indices), dtype=torch.long) * (-100)


class AuxTrainDataset(Dataset):

    def __init__(self, args, train_df, aux_df, tokenizer):
        super().__init__()
        self.train_dataset = TrainDataset(args, train_df, tokenizer, dynamic_indigo=args.dynamic_indigo)
        self.aux_dataset = TrainDataset(args, aux_df, tokenizer, dynamic_indigo=False)

    def __len__(self):
        return len(self.train_dataset) + len(self.aux_dataset)

    def __getitem__(self, idx):
        if idx < len(self.train_dataset):
            return self.train_dataset[idx]
        else:
            return self.aux_dataset[idx - len(self.train_dataset)]


def pad_images(imgs):

    max_shape = [0, 0]
    for img in imgs:

        for i in range(len(max_shape)):
            max_shape[i] = max(max_shape[i], img.shape[-1 - i])

    stack = []
    for img in imgs:
        pad = []

        for i in range(len(max_shape)):
            pad = pad + [0, max_shape[i] - img.shape[-1 - i]]

        stack.append(F.pad(img, pad, value=0))
    return torch.stack(stack)



def bms_collate(batch):

    ids = []
    imgs = []

    batch = [ex for ex in batch if ex[1] is not None]
    formats = list(batch[0][2].keys())

    seq_formats = [k for k in formats if
                   k in ['atomtok', 'inchi', 'nodes', 'atomtok_coords', 'chartok_coords', 'atom_indices']]
    
    refs = {key: [[], []] for key in seq_formats}

    for ex in batch:
        ids.append(ex[0])
        imgs.append(ex[1])
        ref = ex[2]

        for key in seq_formats:
            refs[key][0].append(ref[key])
            refs[key][1].append(torch.LongTensor([len(ref[key])]))

    for key in seq_formats:
        refs[key][0] = pad_sequence(refs[key][0], batch_first=True, padding_value=PAD_ID)
        refs[key][1] = torch.stack(refs[key][1]).reshape(-1, 1)

    if 'coords' in formats:
        refs['coords'] = pad_sequence([ex[2]['coords'] for ex in batch], batch_first=True, padding_value=-1.)

    if 'edges' in formats:
        edges_list = [ex[2]['edges'] for ex in batch]
        max_len = max([len(edges) for edges in edges_list])
        refs['edges'] = torch.stack(
            [F.pad(edges, (0, max_len - len(edges), 0, max_len - len(edges)), value=-100) for edges in edges_list],
            dim=0)
        
    return ids, pad_images(imgs), refs
