import os
os.environ['RANK'] = '0'
os.environ['LOCAL_RANK'] = '0'
os.environ['WORLD_SIZE'] = '1'
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '9999'

import sys
import json
import codecs
from chatgpt.nn import GPTActor, GPTCritic, RewardModel
from chatgpt.trainer import PPOTrainer
from chatgpt.trainer.strategies import ColossalAIStrategy
from copy import deepcopy
import torch
from colossalai.nn.optimizer import CPUAdam, HybridAdam

json_path = '/mnt/smbmount/Dell-_dell7590_root/workspace/NLP datasets/NaturalConv_Release_20210318/dialog_release.json'
dialog_list = json.loads(codecs.open(json_path, "r", "utf-8").read())
prompts = []
for obj in dialog_list:
    prompt = ''.join(obj['content'])
    prompts.append(prompt)
# print(prompts)
# sys.exit(0)
prompts = torch.Tensor(prompts)

strategy = ColossalAIStrategy(stage=3, placement_policy='cuda')

with strategy.model_init_context():
    actor = GPTActor().cuda()
    critic = GPTCritic().cuda()
    initial_model = deepcopy(actor).cuda()
    reward_model = RewardModel(deepcopy(critic.model)).cuda()


trainer = PPOTrainer(
    strategy, actor, critic, reward_model, initial_model,
    CPUAdam(actor.parameters()),
    HybridAdam(critic.parameters()),
)
trainer.fit(prompts)