import os.path as osp

import torch
import timeit
import numpy as np

import torch.nn as nn
import torch.nn.functional as F
from torchvision.utils import save_image
# from torchvision import transforms

from networks import get_model
from utils import *
from PIL import Image
import time
import cv2
from metrics import SegMetric
import re

from utils import generate_label_plain

def get_file_suffix(file_path):
  """"""
  return file_path[file_path.rfind(".") + 1:]


def get_file_name(file_path):
  """
  Get file name.
  Anther implement: os.path.split(file_path)[-1]
  Args:
    file_path:

  Returns:

  """
  return re.split("\\\\|/", file_path)[-1]

def get_file_paths(dir_name:str, file_suffix=[]) -> list:
  """
  Gets all file path.
  Args:
    dir_name(str): The dir name.
    file_suffix(list): The suffix of file.

  Returns:
    file_paths(list):
  """
  file_paths = []
  file_suffix = [v.replace(".", "") for v in file_suffix]
  for dir_name, _, files in os.walk(dir_name):
    for file in files:
      if not file_suffix or get_file_suffix(file) in file_suffix:
        file_paths.append(os.path.join(dir_name, file))
  return file_paths

def img_transform(img):
    # 0-255 to 0-1
    # img = np.float32(np.array(img)) / 255.
    # img = img.transpose((2, 0, 1))
    # img = torch.from_numpy(img.copy())
    import torchvision.transforms as transforms
    transformer = transforms.Compose([
        transforms.ToTensor(),
        # transforms.Normalize(mean=[0.485, 0.456, 0.406],
        #                      std=[0.229, 0.224, 0.225])
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    img = transformer(img)

    return img

def process_img(path, imsize):
    img = Image.open(path).convert("RGB")
    img = img.resize((imsize, imsize), Image.BILINEAR)
    return img_transform(img) 


def predict(config):
    """
    """
    # model 
    G = get_model(config.arch, pretrained=False).to(config.device)
    if config.parallel:
        G = nn.DataParallel(G)
        
    # test_color_label_path = osp.join(config.test_color_label_path, config.arch)
    # test_pred_label_path = osp.join(config.test_pred_label_path, config.arch)
    
    path_pre = config.test_pred_label_path
    
    G.load_state_dict(torch.load(
            osp.join(config.model_save_path, config.arch, "{}_G.pth".format(config.pretrained_model)), map_location=config.device))

    G.eval()
    paths = get_file_paths(config.test_image_path)
    with torch.no_grad():
        for p in paths:
            img = process_img(p, config.imsize).unsqueeze(0)
            outputs = G(img)
            h, w = img.size()[1:3]
            
            if config.arch == 'CE2P' or 'FaceParseNet' in config.arch:
                outputs = outputs[0][-1]
            
            outputs = F.interpolate(outputs, (h, w), mode='bilinear', align_corners=True)
            # pred = outputs.data.max(1)[1].cpu().numpy()  # Matrix index
            
            pred = generate_label_plain(outputs, config.imsize)
            
            cv2.imwrite(f'{path_pre}/{get_file_name(p)}', pred[0])
            
            
    
        
    
    
    



