import os
import random
from contextlib import contextmanager
from typing import Union

import numpy as np
import torch
import torch.distributed as dist
from PIL import Image
from torch.backends import cudnn
from torchvision import transforms


class WaterMark(object):
    '''
    链家版本的水印生成+阵列基础上位置随机
    '''

    def __init__(self, alpha=0.25):
        self.source = "./images/lianjia  LOGO2.png"
        self.source = Image.open(self.source).convert("RGBA")
        self.source = self.source.resize((int(self.source.width // 15 * 1.5), int(self.source.height // 30 * 2.5)))
        self.height, self.width = self.source.height, self.source.width
        self.source = np.array(self.source)

        self.source[..., 3] = self.source[..., 3] * alpha
        self.source = Image.fromarray(self.source)

    def __call__(self, image):
        image = image.convert("RGBA")
        width = image.width
        height = image.height
        blank = Image.new("RGB", (3 * width, 3 * height), color=(0, 0, 0))
        blank.paste(image, (width, height))

        for i in range(3 * width // self.width):
            for j in range(3 * height // self.height):
                if i % 2 == 1 or j % 2 == 1:
                    continue
                blank.paste(self.source, (i * self.width + random.randint(0, self.width // 2),
                                          j * self.height + random.randint(0, self.height // 2)), self.source)


        # for i in range(3 * width // self.width):
        #     for j in range(3 * height // self.height):
        #         for x in range(self.width):
        #             # if i % 2 == 1 or j % 2 == 1:
        #             #     continue
        #             blank.paste(self.source, (i * self.width + x,
        #                                       j * self.height ), self.source)



        # blank.show()
        output = blank.crop((width, height, 2 * width, 2 * height))
        del blank
        return output


class AddPepperNoise(object):
    '''
        功能：增加椒盐噪声
        snr：信噪比Signal Noise Rate
        p:概率值
    '''

    def __init__(self, snr, p=0.9):
        self.snr = snr
        self.p = p

    def __call__(self, img):
        if random.uniform(0, 1) < self.p:
            img_ = np.array(img).copy()
            h, w, c = img_.shape
            signal_pct = self.snr
            noise_pct = (1 - self.snr)  # 噪声信号的比例
            mask = np.random.choice((0, 1, 2), size=(h, w, 1), p=[signal_pct, noise_pct * 0.8, noise_pct * 0.2])
            mask = np.repeat(mask, c, axis=2)
            img_[mask == 1] = 255  # 盐噪声
            img_[mask == 2] = 0  # 椒噪声
            return Image.fromarray(img_.astype('uint8')).convert('RGB')
        else:
            return img


class bcolors:
    HEADER = '\033[95m'
    OKBLUE = '\033[94m'
    OKGREEN = '\033[92m'
    WARNING = '\033[93m'
    FAIL = '\033[91m'
    ENDC = '\033[0m'
    BOLD = '\033[1m'
    UNDERLINE = '\033[4m'


def Color_print(line):
    print(bcolors.OKGREEN + line + bcolors.ENDC)


def denormalize(im: Union[np.ndarray, torch.Tensor], mean=0.5, std=0.5):
    return im * std + mean


def select_device(device='', batch_size=None):
    # device = 'cpu' or '0' or '0,1,2,3'
    # s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} '  # string
    cpu = device.lower() == 'cpu'
    if cpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'  # force torch.cuda.is_available() = False
    elif device:  # non-cpu device requested
        os.environ['CUDA_VISIBLE_DEVICES'] = device  # set environment variable
        assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested'  # check availability

    cuda = not cpu and torch.cuda.is_available()
    if cuda:
        n = torch.cuda.device_count()
        if n > 1 and batch_size:  # check that batch_size is compatible with device_count
            assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
        # space = ' ' * len(s)
        for i, d in enumerate(device.split(',') if device else range(n)):
            p = torch.cuda.get_device_properties(i)
            # s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n"  # bytes to MB
    else:
        pass
        # s += 'CPU\n'

    # logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s)  # emoji-safe
    return torch.device('cuda:0' if cuda else 'cpu')


def init_seeds(seed=0):
    # Initialize random number generator (RNG) seeds
    random.seed(seed)
    np.random.seed(seed)
    init_torch_seeds(seed)


def init_torch_seeds(seed=0):
    # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
    torch.manual_seed(seed)
    if seed == 0:  # slower, more reproducible
        cudnn.benchmark, cudnn.deterministic = False, True
    else:  # faster, less reproducible
        cudnn.benchmark, cudnn.deterministic = True, False


@contextmanager
def torch_distributed_zero_first(local_rank: int):
    """
    Decorator to make all processes in distributed training wait for each local_master to do something.
    """
    if local_rank not in [-1, 0]:
        torch.distributed.barrier()
    yield
    if local_rank == 0:
        torch.distributed.barrier()


def create_dataloader(world_size, dataset, batch_size, rank, workers, train=True):
    transform_train = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
    ])
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
    ])

    # 这里先使用datalist中已经使用了的transforms
    if train:
        transform = transform_train
    else:
        transform = transform_test

    with torch_distributed_zero_first(rank):
        data = dataset
    # 这里
    batch_size = min(batch_size, len(data))
    nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers])
    sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None

    dataloader = torch.utils.data.DataLoader(
        dataset, batch_size=batch_size, num_workers=nw, sampler=sampler, pin_memory=True
    )

    return dataloader, dataset


def reduce_mean(tensor, nprocs):
    rt = tensor.clone()
    dist.all_reduce(rt, op=dist.ReduceOp.SUM)
    rt /= nprocs
    return rt

# def init_seeds(seed=0, cuda_deterministic=True):
#     random.seed(seed)
#     np.random.seed(seed)
#     torch.manual_seed(seed)
#     # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
#     if cuda_deterministic:  # slower, more reproducible
#         cudnn.deterministic = True
#         cudnn.benchmark = False
#     else:  # faster, less reproducible
#         cudnn.deterministic = False
#         cudnn.benchmark = True
