import os
os.environ["MKL_NUM_THREADS"] = "1"  # noqa F402
os.environ["NUMEXPR_NUM_THREADS"] = "1"  # noqa F402
os.environ["OMP_NUM_THREADS"] = "1"  # noqa F402

import numpy as np

import torch
import torch.nn as nn
import math
import torch.nn.functional as F
import torchvision.models as models
from torch.nn.init import kaiming_normal_, constant_
from .cost_volume_util import FeaturePyramidExtractor, WarpingLayer,OpticalFlowEstimator
from .util import conv, predict_flow, deconv, crop_like
__all__ = [
    'flow_cost_volume'
]

class FlowCostVolum(nn.Module):
    def __init__(self, args):
        super(FlowCostVolum, self).__init__()
        self.flow_range = args["flow_range"]
        self.angle_bins = args["angle_bins"]
        self.length_bins = args["length_bins"]
        self.set_missing_to_max = True
        self.batchNorm = False
        self.args = args
        self.initial_model()
        self.warping_layer = WarpingLayer(args)
        # self.feature_pyramid_extractor = FeaturePyramidExtractor(args).to(args["device"])

        # self.flow_estimators = []
        # for l, ch in enumerate(args["lv_chs"][::-1]):
        #     layer = OpticalFlowEstimator(args, ch).to(args["device"])
        #     self.add_module(f'FlowEstimator(Lv{l})', layer)
        #     self.flow_estimators.append(layer)


    def initial_model(self):
        #encoder defined
        resnets = {18: models.resnet18,
                   34: models.resnet34,
                   50: models.resnet50,
                   101: models.resnet101,
                   152: models.resnet152}
        encoder = resnets[18](pretrained=True)
        self.layer0 = nn.Sequential(encoder.conv1, encoder.bn1, encoder.relu)
        self.layer1 = nn.Sequential(encoder.maxpool, encoder.layer1)
        self.layer2 = encoder.layer2
        self.layer3 = encoder.layer3
        self.layer4 = encoder.layer4

        # decoder defined
        out_dims = (self.angle_bins * self.length_bins) + 1 + 64
        # self.conv1   = conv(self.batchNorm, out_dims,  128, stride=1)
        # self.conv1_1 = conv(self.batchNorm, 128,  128, stride=1)
        # self.conv2   = conv(self.batchNorm, 128,  96, stride=1)
        # self.conv2_2 = conv(self.batchNorm, 96,  96, stride=2)
        # self.conv3 = conv(self.batchNorm, 96, 64, stride=1)
        # self.conv3_3 = conv(self.batchNorm, 64, 64, stride=2)
        # self.conv4 = conv(self.batchNorm, 64, 32, stride=1)
        # self.conv4_4 = conv(self.batchNorm, 32, 32, stride=2)
        # self.conv5 = conv(self.batchNorm, 32, 16, stride=1)
        # self.conv5_5 = conv(self.batchNorm, 16, 16, stride=2)
        self.flow_estimators = OpticalFlowEstimator(self.args, out_dims).to(self.args["device"])

    def feature_extraction(self, image, return_all_feats=False):
        feature0 = self.layer1(self.layer0(image))
        feature1 = self.layer2(feature0)
        feature2 = self.layer3(feature1)
        feature3 = self.layer4(feature2)
        if return_all_feats:
            return [feature0, feature1, feature2, feature3]
        else:
            return feature0

    def forward(self, flownet_src_pred, image):
        x1_raw = image[:, :3].contiguous()
        x2_raw = image[:, 3:].contiguous()
        img1_feature_resnet = self.feature_extraction(x1_raw, return_all_feats=False)
        with torch.no_grad():
            candidate_flow = self.compute_flow_candidate(flownet_src_pred[0])
            img2_feature_resnet = self.feature_extraction(x2_raw, return_all_feats=False)
            cost_volume = self.match_feature(img1_feature_resnet, img2_feature_resnet, candidate_flow)
        post_matching_feature = torch.cat([img1_feature_resnet, cost_volume], 1)
        flow_fine = [self.flow_estimators(post_matching_feature)]
        # feature = self.conv_reduce(post_matching_feature)

        # flow_fine = []
        # for l, img_feature in enumerate(feature):
        #     flow_fine.append(self.flow_estimators[l](img_feature))
        return flow_fine

    def conv_reduce(self, post_matching_feature):
        feature = []
        feature.append(self.conv1_1(self.conv1(post_matching_feature)))
        feature.append(self.conv2_2(self.conv2(feature[-1])))
        feature.append(self.conv3_3(self.conv3(feature[-1])))
        feature.append(self.conv4_4(self.conv4(feature[-1])))
        feature.append(self.conv5_5(self.conv5(feature[-1])))
        return feature

    def match_feature(self, img1_feature, img2_feature, candidate_flow):
        batch_cost_volume = []
        bs, ch, candi, h, w = candidate_flow.shape
        for batch_idx in range(bs):
            feature1 = img1_feature[batch_idx, :]
            feature2 = img2_feature[batch_idx, :].unsqueeze(0).repeat(candi, 1, 1, 1)
            flow = candidate_flow[batch_idx, :].permute(1, 0, 2, 3).type_as(feature2)
            feature2_warp = self.warping_layer(feature2, flow)
            diffs = torch.abs(feature2_warp - feature1).mean(1)
            # construct edge mask
            edge_mask = torch.zeros_like(diffs)
            edge_mask[:, 2:-2, 2:-2] = 1.0
            cost_volume = diffs * edge_mask
            # cost_volume = diffs
            missing_val_mask = (cost_volume == 0).float()
            if self.set_missing_to_max:
                cost_volume = cost_volume * (1 - missing_val_mask) + \
                              cost_volume.max(0)[0].unsqueeze(0) * missing_val_mask

            batch_cost_volume.append(cost_volume)
        batch_cost_volume = torch.stack(batch_cost_volume, 0)
        return batch_cost_volume

        pass
    def compute_flow_candidate(self, flownet_src_pred):
        length_list = list(
            np.arange(self.flow_range / self.length_bins, 1 + self.flow_range / self.length_bins,
                      self.flow_range / self.length_bins))
        angle_list = torch.from_numpy(np.arange(0, 2 * math.pi, 2 * math.pi / self.angle_bins))
        sin_angle = torch.sin(angle_list)
        cos_angle = torch.cos(angle_list)
        x_bins = []
        y_bins = []
        for lens in length_list:
            x_bins.append(lens * sin_angle)
            y_bins.append(lens * cos_angle)
        bs, c, h, w = flownet_src_pred.shape
        x_bins = torch.cat(x_bins).to(flownet_src_pred.device).unsqueeze(0).repeat(h, w, 1).permute(2, 0, 1)
        y_bins = torch.cat(y_bins).to(flownet_src_pred.device).unsqueeze(0).repeat(h, w, 1).permute(2, 0, 1)
        candidate_flow = []
        for i in range(bs):
            flow = flownet_src_pred[i, :]
            x_flow = (flow[0, :] + x_bins).unsqueeze(0)
            y_flow = (flow[1, :] + y_bins).unsqueeze(0)
            candidate_flow.append(torch.cat([x_flow, y_flow], dim=0).unsqueeze(0))
        candidate_flow = torch.cat(candidate_flow, dim=0)
        final_flow = torch.cat([flownet_src_pred.unsqueeze(2), candidate_flow], dim=2)
        return final_flow

def flow_cost_volume(args, data=None):
    """FlowNetS model architecture from the
    "Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852)

    Args:
        data : pretrained weights of the network. will create a new one if not set
    """
    model = FlowCostVolum(args)
    if data is not None:
        model.load_state_dict(data['state_dict'])
    return model