import os
import json
import random
import argparse
from dataclasses import dataclass
from typing import List, Dict, Any, Tuple

import torch
from torch.utils.data import Dataset, DataLoader

from transformers import AutoTokenizer
from trl.models.modeling_value_head import AutoModelForCausalLMWithValueHead
from trl.trainer.ppo_trainer import PPOTrainer
from trl.trainer import PPOConfig


def try_import_bertscore():
    try:
        from bert_score import score as bertscore_score  # type: ignore
        return bertscore_score
    except Exception as e:
        raise ImportError(
            'bert-score is required. Please install via: pip install bert-score'
        ) from e


def actions_to_text(actions: List[int]) -> str:
    idx2actions = {
        0: 'STOP',
        1: '↑',
        2: '←',
        3: '→',
    }
    return ''.join(idx2actions.get(int(a), '') for a in actions)


class TrajTextDataset(Dataset):
    def __init__(self, data_roots: List[str], max_samples: int = None):
        self.samples: List[Dict[str, Any]] = []
        for root in data_roots:
            anno_path = os.path.join(root, 'annotations.json')
            if not os.path.exists(anno_path):
                continue
            with open(anno_path, 'r') as f:
                items = json.load(f)
            for it in items:
                instructions = it.get('instructions', None)
                if instructions is None:
                    continue
                if not isinstance(instructions, list):
                    instructions = [instructions]
                actions = it.get('actions', [])
                if not actions:
                    continue
                # drop initial action (to align with SFT code), and append STOP for closure
                actions = list(map(int, actions[1:])) + [0]
                self.samples.append({
                    'actions': actions,
                    'references': instructions,
                })
        if max_samples is not None:
            self.samples = self.samples[:max_samples]

    def __len__(self) -> int:
        return len(self.samples)

    def __getitem__(self, idx: int) -> Dict[str, Any]:
        item = self.samples[idx]
        act_text = actions_to_text(item['actions'])
        query = (
            'You are an autonomous navigation assistant. '
            'Given the following action sequence, describe the trajectory in natural language: '
            f'{act_text}'
        )
        return {
            'query': query,
            'references': item['references'],
        }


@dataclass
class ScriptArgs:
    model_name_or_path: str
    output_dir: str
    data_roots: str
    batch_size: int = 4
    mini_batch_size: int = 4
    ppo_epochs: int = 4
    learning_rate: float = 5e-6
    target_kl: float = 0.1
    init_kl_coef: float = 0.02
    max_new_tokens: int = 64
    seed: int = 42
    log_with: str = 'wandb'  # or 'tensorboard' / 'none'
    project_name: str = 'ppo-traj-bertscore'
    max_samples: int = None


def build_ppo_config(args: ScriptArgs) -> PPOConfig:
    log_with = None if args.log_with in ['none', ''] else args.log_with
    cfg = PPOConfig(
        batch_size=args.batch_size,
        mini_batch_size=args.mini_batch_size,
        ppo_epochs=args.ppo_epochs,
        learning_rate=args.learning_rate,
        target_kl=args.target_kl,
        init_kl_coef=args.init_kl_coef,
        tracker_project_name=args.project_name,
        log_with=log_with,
        seed=args.seed,
    )
    return cfg


def compute_bertscore_max(preds: List[str], refs_list: List[List[str]]) -> List[float]:
    bertscore_score = try_import_bertscore()
    scores: List[float] = []
    for pred, refs in zip(preds, refs_list):
        # take max over references
        if not isinstance(refs, list):
            refs = [refs]
        P, R, F1 = bertscore_score([pred] * len(refs), refs, lang='en', rescale_with_baseline=True)
        scores.append(float(F1.max().item()))
    return scores


def tokenize_query(tokenizer, texts: List[str]) -> List[torch.Tensor]:
    out = tokenizer(texts, return_tensors='pt', padding=True)
    input_ids = out['input_ids']
    # split into list of tensors for TRL
    return [row[row != tokenizer.pad_token_id] for row in input_ids]


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_name_or_path', type=str, required=True)
    parser.add_argument('--output_dir', type=str, required=True)
    parser.add_argument('--data_roots', type=str, required=True, help='Comma-separated paths containing annotations.json')
    parser.add_argument('--batch_size', type=int, default=4)
    parser.add_argument('--mini_batch_size', type=int, default=4)
    parser.add_argument('--ppo_epochs', type=int, default=4)
    parser.add_argument('--learning_rate', type=float, default=5e-6)
    parser.add_argument('--target_kl', type=float, default=0.1)
    parser.add_argument('--init_kl_coef', type=float, default=0.02)
    parser.add_argument('--max_new_tokens', type=int, default=64)
    parser.add_argument('--seed', type=int, default=42)
    parser.add_argument('--log_with', type=str, default='wandb', choices=['wandb','tensorboard','none'])
    parser.add_argument('--project_name', type=str, default='ppo-traj-bertscore')
    parser.add_argument('--max_samples', type=int, default=None)
    args_ns = parser.parse_args()

    sargs = ScriptArgs(
        model_name_or_path=args_ns.model_name_or_path,
        output_dir=args_ns.output_dir,
        data_roots=args_ns.data_roots,
        batch_size=args_ns.batch_size,
        mini_batch_size=args_ns.mini_batch_size,
        ppo_epochs=args_ns.ppo_epochs,
        learning_rate=args_ns.learning_rate,
        target_kl=args_ns.target_kl,
        init_kl_coef=args_ns.init_kl_coef,
        max_new_tokens=args_ns.max_new_tokens,
        seed=args_ns.seed,
        log_with=args_ns.log_with,
        project_name=args_ns.project_name,
        max_samples=args_ns.max_samples,
    )

    random.seed(sargs.seed)
    torch.manual_seed(sargs.seed)

    tokenizer = AutoTokenizer.from_pretrained(sargs.model_name_or_path, use_fast=True)
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token

    model = AutoModelForCausalLMWithValueHead.from_pretrained(sargs.model_name_or_path)
    model.config.use_cache = False

    cfg = build_ppo_config(sargs)
    trainer = PPOTrainer(
        config=cfg,
        model=model,
        tokenizer=tokenizer,
        dataset=None,
    )

    dataset = TrajTextDataset([p.strip() for p in sargs.data_roots.split(',')], max_samples=sargs.max_samples)
    loader = DataLoader(dataset, batch_size=sargs.batch_size, shuffle=True)

    os.makedirs(sargs.output_dir, exist_ok=True)

    for epoch in range(9999999):  # run until interrupted; control with steps externally if needed
        for batch in loader:
            queries: List[str] = batch['query']
            references_batch: List[List[str]] = batch['references']

            query_tensors = tokenize_query(tokenizer, queries)
            response_tensors = trainer.generate(
                query_tensors,
                batch_size=len(query_tensors),
                return_prompt=False,
                max_new_tokens=sargs.max_new_tokens,
                do_sample=True,
                top_k=50,
                top_p=0.9,
                temperature=0.8,
            )
            responses = tokenizer.batch_decode(response_tensors, skip_special_tokens=True)

            rewards = compute_bertscore_max(responses, references_batch)
            stats = trainer.step(query_tensors, response_tensors, rewards)
            trainer.log_stats(stats, batch={"query": queries, "response": responses}, rewards=rewards)

        # save checkpoint every epoch
        save_path = os.path.join(sargs.output_dir, f'checkpoint-epoch-{epoch}')
        os.makedirs(save_path, exist_ok=True)
        trainer._save_pretrained(save_path)


if __name__ == '__main__':
    main()

