import torch
import torch.utils.data
from torch.utils.data.dataloader import DataLoader
from torch.nn.parallel import DistributedDataParallel as Ddp
from ._time_window import TimeWindow
from ._count_window import CountWindow
from contextlib import nullcontext
import time
from ._async_ck import AsyncCKProcess
from ._report_train_speed import AsyncReportProcess
import multiprocessing
import io
import logging

logger = logging.getLogger(__name__)


class Trainer(object):
    def __init__(
        self,
        model: Ddp,
        loss_fn,
        optimizer: torch.optim.Optimizer,
        data_loader: torch.utils.data.DataLoader,
        window_type: str,
        window_size: int,
        window_threshold: int,
        rank: int,
        val_loader: DataLoader = None,
        ck_interval: int = -1,
        ck_key: str = "model_ck",
        report_speed_interval = -1,
        speed_key = "ml_sp",
        redis_addr: str = "",
        redis_port: int = 0,
        log_interval: int = 1,
        pre_train_batch: int = 0,
    ):
        super().__init__()
        self.model = model
        self.loss_fn = loss_fn
        self.optimizer = optimizer
        self.data_loader = data_loader
        self.val_loader = val_loader
        self.window_type = window_type
        self.window_size = window_size
        self.window_threshold = window_threshold
        self.rank = rank
        self.ck_interval = ck_interval
        self.ck_key = ck_key
        self.report_speed_interval = report_speed_interval
        self.speed_key = speed_key
        self.ck_process = None
        self.redis_addr = redis_addr
        self.redis_port = redis_port
        self.pre_train_batch = pre_train_batch
        self.log_interval = log_interval
        
        if window_type.startswith("time"):
            self.window = TimeWindow(data_loader, window_size, window_threshold)
        elif window_type.startswith("count"):
            self.window = CountWindow(data_loader, window_size, window_threshold)

    def train(self):
        self.prepare_train()
        start_time = int(time.time() * 1000)
        if self.window_type == "time":
            self.__time_window_train()
        elif self.window_type == "count":
            self.__count_window_train()
        end_time = int(time.time() * 1000)
        if self.rank == 0:
            print(f"finish train: time cost: {(end_time - start_time)} ms")
        self.finish_train()

    def prepare_train(self):
        if self.rank == 0:
            if self.ck_interval > 0:
                self.queue = multiprocessing.Queue(maxsize=1024)
                self.ck_process = AsyncCKProcess(
                    self.queue,
                    self.redis_addr,
                    self.redis_port,
                    ck_key=self.ck_key,
                )
                self.ck_process.start()
            if self.report_speed_interval > 0:
                self.sp_queue = multiprocessing.Queue(maxsize=1024)
                self.sp_process = AsyncReportProcess(
                    self.sp_queue,
                    self.redis_addr,
                    self.redis_port,
                    self.speed_key,
                )
                self.sp_process.start()

    def finish_train(self):
        if self.rank == 0:
            if self.ck_interval > 0:
                self.queue.put(None)
                self.ck_process.join()
            if self.report_speed_interval > 0:
                self.sp_queue.put(None)
                self.sp_process.join()

    def __time_window_train(self):
        last_log_time = int(time.time())
        last_ck_time = int(time.time())
        last_report_time = int(time.time())
        last_report_examples = 0

        window_data = self.window.poll()
        loss = None
        acc_step = 0
        acc_examples = 0
        while window_data:
            pred = self.model(window_data.X)
            if loss is None:
                loss = self.loss_fn(pred, window_data.y)
            else:
                loss += self.loss_fn(pred, window_data.y)

            acc_step += 1
            acc_examples += len(window_data.X)
            if window_data.trigger_window:
                loss /= acc_step
                loss.backward()
                self.optimizer.step()
                self.optimizer.zero_grad()
                loss = None
            now = int(time.time())
            if now - last_log_time >= self.log_interval:
                logger.info(f"acc_examples: {acc_examples}")
                last_log_time = now
            if (
                self.rank == 0
                and self.ck_interval > 0
                and now - last_ck_time >= self.ck_interval
            ):
                buffer = io.BytesIO()
                torch.save(self.model.state_dict(), buffer)
                self.queue.put(buffer.getvalue())
                
            if (
                self.rank == 0
                and self.report_speed_interval > 0
                and now - last_report_time >= self.report_speed_interval
            ):
                self.sp_queue.put((acc_examples - last_report_examples) / (now - last_report_time))
                last_report_examples = acc_examples
                last_report_time = now

            window_data = self.window.poll()

    def __count_window_train(self):
        last_log_time = int(time.time())
        last_ck_time = int(time.time())
        last_report_time = int(time.time())
        last_report_examples = 0
        
        acc_examples = 0
        acc_step = self.window_size / self.window_threshold
        window_data = self.window.poll()
        while window_data:
            ctx = nullcontext if window_data.trigger_window else self.model.no_sync
            with ctx():
                pred = self.model(window_data.X)
                loss = self.loss_fn(pred, window_data.y)
                loss /= acc_step
                loss.backward()
            acc_examples += len(window_data.X)
            if window_data.trigger_window:
                self.optimizer.step()
                self.optimizer.zero_grad()

            now = int(time.time())
            if self.rank == 0 and now - last_log_time >= self.log_interval:
                print("worker: %d, trained examples %d" % (self.rank, acc_examples))
                last_log_time = now

            if (
                self.rank == 0
                and self.ck_interval > 0
                and now - last_ck_time >= self.ck_interval
            ):
                buffer = io.BytesIO()
                torch.save(self.model.state_dict(), buffer)
                self.queue.put(buffer.getvalue())
                
            if (
                self.rank == 0
                and self.report_speed_interval > 0
                and now - last_report_time >= self.report_speed_interval
            ):
                self.sp_queue.put((acc_examples - last_report_examples) / (now - last_report_time))
                last_report_examples = acc_examples
                last_report_time = now

            window_data = self.window.poll()
