import time
import numpy as np
import torch
import os

from transformers import BertTokenizer, BertModel
from transformers import AutoModel
from transformers import logging
logging.set_verbosity_error()

from my_request import MyRequest
from my_compute_F_min import compute_F_min
from my_single_layer_time import MySingleLayerTime
from customized_bert import BertEmbeddings, BertEncoder, BertLayer, BertPooler, BertConfig

from tools import ModelStage
from tools import bert_embeddings_forward, bert_encoder_forward, bert_pooler_forward
from tools import bert_layer_forward_i
from transformers.modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
import bert_tools

# warm up
# device = "cuda:0"
# x0 = torch.tensor([0])
# x1 = torch.tensor([1])
# x0.to(device)
# x2 = torch.matmul(x0, x1)
# torch.cuda.synchronize(device)
# torch.cuda.empty_cache()

# os.environ["http_proxy"]="http://172.18.219.43:7778"
# os.environ["https_proxy"]="http://172.18.219.43:7778"
# os.environ["all_proxy"]="http://172.18.219.43:7778"

req = MyRequest("bert-base-cased", 0.02)
model_name = req.model
model_SLO = req.SLO

model = AutoModel.from_pretrained(model_name, cache_dir="./transformers")
total_encoder_layer_num = len(model.encoder.layer)
# print(total_encoder_layer_num)

model_path_dir = './splitted_model/' + model_name
if not os.path.exists(model_path_dir):
     os.makedirs(model_path_dir)

# model = model.cuda()
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
input_text = ["Paris is the capital of [MASK].",]
encoded_input = tokenizer(input_text, return_tensors='pt').to("cuda:0")
# example = torch.rand(1, 10).long()
# example = example.cuda()
# temp = (inputs['input_ids'], inputs['attention_mask'], inputs['token_type_ids'], )
# print(temp)
# traced_script_module = torch.jit.trace(model, temp, strict=False)
# traced_script_module.save(model_path_dir + '/bert_gpu.pt')

model_stage_list_cpu = []

config = BertConfig().from_pretrained(model_name)

embeddings = BertEmbeddings(config)
encoder = BertEncoder(config)
pooler = BertPooler(config)

# 保存模型的embedding层
torch.save(model.embeddings.state_dict(), model_path_dir + '/embeddings.pth')
# torch.save(model.embeddings.state_dict(), model_path_dir + '/embeddings.pt')
# 保存模型的embedding层
torch.save(model.encoder.layer.state_dict(), model_path_dir + '/encoder.pth')
# 保存模型encoder层中的12个layer
for i in range(total_encoder_layer_num):
     torch.save(model.encoder.layer[i].state_dict(), model_path_dir + '/encoder_{}.pth'.format(i))
# 保存模型的BertPooler层，句子语义的特征向量表示
torch.save(model.pooler.state_dict(), model_path_dir + '/pooler.pth')

layer_list = []
param_list = []

model = model.cpu()
embeddings.load_state_dict(model.embeddings.state_dict())
param_list.append(model.embeddings.state_dict())
model_stage_list_cpu.append(embeddings)
for i in range(total_encoder_layer_num):
     layer = BertLayer(config)
     layer.load_state_dict(model.encoder.layer[i].state_dict())
     param_list.append(model.encoder.layer[i].state_dict())
     model_stage_list_cpu.append(layer)
pooler.load_state_dict(model.pooler.state_dict())
param_list.append(model.pooler.state_dict())
model_stage_list_cpu.append(pooler)

device0 = "cuda:0"
device1 = "cuda:1"

test_time = 100
total_layer_num = 2 + total_encoder_layer_num
avai_gpu_num = 4

def get_single_layer_time(model_stage_list_cpu):
     global hidden_states, encoder_outputs, pooler_output

     total_cal_time = 0
     total_offload_time = 0
     total_load_time = 0
     test_time = 10

     for i in range(test_time):
          model_stage_list_gpu = []
          for j, model_stage in enumerate(model_stage_list_cpu):
               s1 = time.time()
               model_stage_gpu = model_stage.to(device0)
               model_stage_gpu.eval()
               torch.cuda.synchronize(device0)
               if i != 0:
                    total_load_time += time.time() - s1
               model_stage_list_gpu.append(model_stage_gpu)

          s1 = time.time()
          embedding_output = bert_embeddings_forward(model_stage_list_gpu[0], encoded_input['input_ids'], encoded_input['token_type_ids'])
          torch.cuda.synchronize(device0)
          if i != 0:
               total_cal_time += time.time() - s1
               # print("cal time: ", time.time() - s1)
          
          s1 = time.time()
          temp = embedding_output.to(device1)
          torch.cuda.synchronize(device0)
          torch.cuda.synchronize(device1)
          if i != 0:
               total_offload_time += time.time() - s1
          temp = None

          hidden_states = embedding_output
          for j in range(total_encoder_layer_num):
               s1 = time.time()
               hidden_states = bert_layer_forward_i(model_stage_list_gpu[j + 1], 0, hidden_states, None, None)
               torch.cuda.synchronize(device0)
               if i != 0:
                    total_cal_time += time.time() - s1
                    # print("cal time: ", time.time() - s1)

               s1 = time.time()
               temp = hidden_states.to(device1)
               torch.cuda.synchronize(device0)
               torch.cuda.synchronize(device1)
               if i != 0:
                    total_offload_time += time.time() - s1
               temp = None

          encoder_outputs = BaseModelOutputWithPastAndCrossAttentions(
               last_hidden_state=hidden_states,
               past_key_values=None,
               hidden_states=None,
               attentions=None,
               cross_attentions=None,
          )
          s1 = time.time()
          pooler_output = bert_pooler_forward(encoder_outputs, model_stage_list_gpu[13])
          torch.cuda.synchronize(device0)
          if i != 0:
               total_cal_time += time.time() - s1
               # print("cal time: ", time.time() - s1)

          for model_stage in model_stage_list_gpu:
               model_stage = model_stage.cpu()
               model_stage = None
          model_stage_list_gpu = None

          torch.cuda.empty_cache()

     single_layer_cal_time = total_cal_time / (total_layer_num * (test_time - 1))
     single_layer_offload_time = total_offload_time / ((total_layer_num - 1) * (test_time - 1))
     single_layer_load_time = total_load_time / (total_layer_num * (test_time - 1))

     return single_layer_cal_time, single_layer_offload_time, single_layer_load_time

# single_layer_cal_time, single_layer_offload_time, single_layer_load_time = get_single_layer_time(model_stage_list_cpu)
# single_layer_time = MySingleLayerTime(single_layer_cal_time, single_layer_offload_time, single_layer_load_time)
# print(single_layer_cal_time, single_layer_offload_time, single_layer_load_time)
# sol = compute_F_min(total_layer_num, model_SLO, avai_gpu_num, single_layer_time)
# print(sol)

def get_single_layer_time2(model_stage_list_cpu):
     global hidden_states, encoder_outputs, pooler_output

     total_cal_time = 0
     total_offload_time = 0
     total_load_time = 0
     global test_time

     for i in range(test_time):
          model_stage_list_gpu = []
          for j, model_stage in enumerate(model_stage_list_cpu):
               s1 = time.time()
               model_stage_gpu = model_stage.to(device0, non_blocking=True)
               model_stage_gpu.eval()
               torch.cuda.synchronize(device0)
               if i != 0:
                    total_load_time += time.time() - s1
               model_stage_list_gpu.append(model_stage_gpu)

          s1 = time.time()
          preprocess_output = bert_tools.bert_preprocess(encoded_input, config, model_stage_list_gpu[0])
          embedding_output = model_stage_list_gpu[0](input_ids=preprocess_output.input_ids,
                                            position_ids=preprocess_output.position_ids,
                                            token_type_ids=preprocess_output.token_type_ids,
                                            inputs_embeds=preprocess_output.inputs_embeds,
                                            past_key_values_length=preprocess_output.past_key_values_length)
          encoder_preprocess_output = bert_tools.encoder_preprocess(preprocess_output.output_hidden_states,
                                                            preprocess_output.output_attentions,
                                                            config, preprocess_output.use_cache)
          encoder_preprocess_output.hidden_states = embedding_output
          
          torch.cuda.synchronize(device0)
          if i != 0:
               total_cal_time += time.time() - s1
               # print("cal time: ", time.time() - s1)
          
          s1 = time.time()
          temp = encoder_preprocess_output.hidden_states.to(device1)
          temp = preprocess_output.extended_attention_mask.to(device1)
          if preprocess_output.encoder_hidden_states != None:
               temp = preprocess_output.encoder_hidden_states.to(device1)
          if preprocess_output.encoder_extended_attention_mask != None:
               temp = preprocess_output.encoder_extended_attention_mask.to(device1)
          torch.cuda.synchronize(device0)
          torch.cuda.synchronize(device1)
          if i != 0:
               total_offload_time += time.time() - s1
          temp = None

          for j in range(total_encoder_layer_num):
               s1 = time.time()
               encoder_preprocess_output = bert_tools.bert_layer_forward_i(model_stage_list_gpu[j + 1], 0, encoder_preprocess_output,
                                                                preprocess_output, config)
               torch.cuda.synchronize(device0)
               if i != 0:
                    total_cal_time += time.time() - s1
                    # print("cal time: ", time.time() - s1)

               s1 = time.time()
               temp = encoder_preprocess_output.hidden_states.to(device1)
               temp = preprocess_output.extended_attention_mask.to(device1)
               if preprocess_output.encoder_hidden_states != None:
                    temp = preprocess_output.encoder_hidden_states.to(device1)
               if preprocess_output.encoder_extended_attention_mask != None:
                    temp = preprocess_output.encoder_extended_attention_mask.to(device1)
               torch.cuda.synchronize(device0)
               torch.cuda.synchronize(device1)
               if i != 0:
                    total_offload_time += time.time() - s1
               temp = None

          s1 = time.time()
          encoder_outputs = bert_tools.get_encoder_outputs(preprocess_output, encoder_preprocess_output)
          pooler_output = bert_tools.bert_pooler_forward(model_stage_list_gpu[-1], encoder_outputs, preprocess_output.return_dict)
          torch.cuda.synchronize(device0)
          if i != 0:
               total_cal_time += time.time() - s1
               # print("cal time: ", time.time() - s1)

          s1 = time.time()
          encoder_outputs.last_hidden_state = encoder_outputs.last_hidden_state.to(device1)
          torch.cuda.synchronize(device0)
          torch.cuda.synchronize(device1)
          if i != 0:
               total_offload_time += time.time() - s1
          temp = None

          for model_stage in model_stage_list_gpu:
               model_stage = model_stage.cpu()
               model_stage = None
          model_stage_list_gpu = None

          torch.cuda.empty_cache()

     single_layer_cal_time = total_cal_time / (total_layer_num * (test_time - 1))
     single_layer_offload_time = total_offload_time / ((total_layer_num - 1) * (test_time - 1))
     single_layer_load_time = total_load_time / (total_layer_num * (test_time - 1))

     return single_layer_cal_time, single_layer_offload_time, single_layer_load_time

single_layer_cal_time, single_layer_offload_time, single_layer_load_time = get_single_layer_time2(model_stage_list_cpu)
single_layer_time = MySingleLayerTime(single_layer_cal_time, single_layer_offload_time, single_layer_load_time)
print(single_layer_cal_time, single_layer_offload_time, single_layer_load_time)

total_reuse_time = 0

layer_list = []
param_list = []

model = model.cpu()
embeddings.load_state_dict(model.embeddings.state_dict())
param_list.append(model.embeddings.state_dict())
layer_list.append(embeddings.to(device0))
for i in range(total_encoder_layer_num):
     layer = BertLayer(config)
     layer.load_state_dict(model.encoder.layer[i].state_dict())
     param_list.append(model.encoder.layer[i].state_dict())
     layer_list.append(layer.to(device0))
pooler.load_state_dict(model.pooler.state_dict())
param_list.append(model.pooler.state_dict())
layer_list.append(pooler.to(device0))

for i in range(test_time):
     for j in range(total_layer_num):
          if j == 0 or j == total_layer_num - 1:
               s1 = time.time()
               layer_list[j].load_state_dict(param_list[j])
               total_reuse_time += time.time() - s1
          else:
               if i % 2 == 0:
                    s1 = time.time()
                    layer_list[j].load_state_dict(param_list[total_layer_num - j - 1])
                    total_reuse_time += time.time() - s1
               else:
                    s1 = time.time()
                    layer_list[j].load_state_dict(param_list[j])
                    total_reuse_time += time.time() - s1

print("single_layer_reuse_time: ", total_reuse_time / test_time / total_layer_num)
