import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Categorical
import torch.nn.functional as F
from collections import deque
import numpy as np



class TradingEnv:
    def __init__(self, data, predict_model,features, init_balance=1e6):
        self.data = data.copy()
        self.predict_model = predict_model
        self.init_balance = torch.tensor(float(init_balance), dtype=torch.float32)
        self.features = features
        self.max_step = len(data)
        self._initial_setup()
        self.balance = self.init_balance.clone()
        self.position = torch.tensor(0.0, dtype=torch.float32)
        self.total_reward = 0

    def _initial_setup(self):
        """初始化运行时变量（类型安全版）"""
        self.current_step = 30
        self.balance = self.init_balance.clone()
        self.position = torch.tensor(0.0, dtype=torch.float32)

    def reset(self):
        self._initial_setup()
        self.total_reward = 0  # 新增重置奖励值
        return self.get_state()

    def get_state(self):
        """返回状态： [预测上涨概率, 仓位比例]"""
        try:
            # 安全检查
            if self.current_step < 30 or self.current_step > len(self.data) - 1:
                raise IndexError(f"Invalid step: {self.current_step}/{len(self.data)}")

            # 在获取数据窗口时严格对齐特征
            window_raw = self.data[self.features].iloc[self.current_step - 30:self.current_step].copy()
            window_raw = window_raw[self.features]
            # 添加维度检查
            assert isinstance(window_raw, pd.DataFrame), "输入必须是DataFrame"
            assert window_raw.columns.tolist() == self.features, "特征列顺序不匹配"

            # 执行预测
            prob_up = self.predict_model.predict(window_raw)  # 直接传入DataFrame

            # 将预测结果转换为张量
            prob_up = torch.tensor(prob_up, dtype=torch.float32).squeeze()  # 处理可能的单元素数组

            # 计算仓位比例
            current_close = torch.tensor(
                self.data['close'].iloc[self.current_step],
                dtype=torch.float32
            )
            position_value = self.position * current_close
            position_ratio = position_value / (self.balance + position_value + 1e-8)

            # 确保所有参与拼接的都是1D张量
            state_tensor = torch.cat([
                prob_up.reshape(1),
                position_ratio.reshape(1)
            ])

            return state_tensor.numpy()

        except Exception as e:
            raise RuntimeError(f"State build failed: {str(e)}")

    def step(self, action):

        balance_before = self.balance.clone()
        position_before = self.position.clone()
        state_before = self.get_state()
        step_before = self.current_step
        current_price = torch.tensor(
            self.data['close'].iloc[self.current_step],
            dtype=torch.float32
        )
        prev_price = torch.tensor(
            self.data['close'].iloc[self.current_step - 1],
            dtype=torch.float32
        )

        try:
            # 交易执行
            if action in [0, 1, 2]:  # 卖出操作：全部/50%/30%
                sell_ratios = [1.0, 0.5, 0.3]
                sell_ratio = torch.tensor(sell_ratios[action], dtype=torch.float32)

                if self.position > 0:
                    sell_qty = self.position * sell_ratio
                    sell_value = sell_qty * current_price
                    transaction_cost = sell_value * 0.0002

                    self.balance += (sell_value - transaction_cost)
                    self.position -= sell_qty

            elif action in [3, 4, 5]:  # 买入操作：30%/50%/全部
                buy_ratios = [0.3, 0.5, 1.0]
                buy_ratio = torch.tensor(buy_ratios[action - 3], dtype=torch.float32)

                available = self.balance - 1000  # 保留最低现金
                if available <= 0:
                    raise ValueError("Insufficient balance")

                max_qty = available // current_price
                buy_qty = max_qty * buy_ratio
                cost = buy_qty * current_price
                transaction_cost = cost * 0.0002

                self.balance -= (cost + transaction_cost)
                self.position += buy_qty

            elif action == 6:  # 持有
                pass

            else:
                raise ValueError(f"Invalid action: {action}")

            # 时间步递增
            self.current_step += 1
            done = self.current_step >= self.max_step  # 关键修复2
            # 计算奖励
            portfolio_before = balance_before + position_before * prev_price
            portfolio_after = self.balance + self.position * current_price
            reward = (portfolio_after - portfolio_before) / self.init_balance
            self.total_reward += reward

            if done:
                return state_before, reward.item(), done, {'status': 'success'}
            else:
                return self.get_state(), reward.item(), done, {'status': 'success'}

        except Exception as e:
            self.current_step += 1
            return self.get_state(), -0.1, self.current_step >= self.max_step, {
                'error': str(e)
            }

class Critic(nn.Module):
    """价值网络（评估状态价值V(s)）"""

    def __init__(self, state_dim):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(state_dim, 64),
            nn.ReLU(),
            nn.Linear(64, 64),
            nn.ReLU(),
            nn.Linear(64, 1)
        )

    def forward(self, state):
        return self.net(state)


class Actor(nn.Module):
    def __init__(self, state_dim, action_dim):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(state_dim, 64),
            nn.LayerNorm(64),
            nn.Tanh(),
            nn.Linear(64, 64),
            nn.LayerNorm(64),
            nn.Tanh(),
            nn.Linear(64, action_dim)
        )
        # 参数初始化
        for layer in self.net:
            if isinstance(layer, nn.Linear):
                nn.init.kaiming_normal_(layer.weight, mode='fan_in', nonlinearity='tanh')
                nn.init.constant_(layer.bias, 0.0)

    def forward(self, state):
        return self.net(state)


class A2CAgent:
    def __init__(self, state_dim, action_dim):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.gamma = 0.95
        self.entropy_coef = 0.01
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        # 初始化网络
        self.actor = Actor(state_dim, action_dim).to(self.device)
        self.critic = Critic(state_dim).to(self.device)

        # 优化器
        self.optimizer = optim.Adam([
            {'params': self.actor.parameters()},
            {'params': self.critic.parameters()}
        ], lr=0.001)

        # 经验回放
        self.memory = deque(maxlen=2000)
        self.batch_size = 32

    def remember(self, state, action, reward, next_state, done):
        self.memory.append((state, action, reward, next_state, done))

    def act(self, state, training=True):
        state = torch.FloatTensor(state).to(self.device)
        with torch.no_grad():
            logits = self.actor(state)
        dist = Categorical(logits=logits)
        return dist.sample().item()

    def train(self):
        if len(self.memory) < self.batch_size:
            return

        # 随机采样经验
        batch = np.random.choice(len(self.memory), self.batch_size, replace=False)
        states, actions, rewards, next_states, dones = zip(*[self.memory[i] for i in batch])

        # 转换为Tensor
        states = torch.FloatTensor(states).to(self.device)
        actions = torch.LongTensor(actions).to(self.device)
        rewards = torch.FloatTensor(rewards).to(self.device)
        next_states = torch.FloatTensor(next_states).to(self.device)
        dones = torch.FloatTensor(dones).to(self.device)

        # 计算Critic损失
        current_v = self.critic(states).squeeze()
        with torch.no_grad():
            next_v = self.critic(next_states).squeeze()
            target_values = rewards + self.gamma * next_v * (1 - dones)
        critic_loss = F.mse_loss(current_v, target_values)

        # 计算Actor损失
        logits = self.actor(states)
        dist = Categorical(logits=logits)
        log_probs = dist.log_prob(actions)
        entropy = dist.entropy().mean()

        advantages = (target_values - current_v).detach()
        actor_loss = -(log_probs * advantages).mean() - self.entropy_coef * entropy

        # 合并损失并更新
        total_loss = critic_loss + actor_loss
        self.optimizer.zero_grad()
        total_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 0.5)
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 0.5)
        self.optimizer.step()

    def save(self, filename):
        torch.save({
            'actor': self.actor.state_dict(),
            'critic': self.critic.state_dict(),
            'optimizer': self.optimizer.state_dict(),
        }, filename)

    def load(self, filename):
        checkpoint = torch.load(filename)
        self.actor.load_state_dict(checkpoint['actor'])
        self.critic.load_state_dict(checkpoint['critic'])
        self.optimizer.load_state_dict(checkpoint['optimizer'])

    def train_batch(self, states, actions, rewards, next_states, dones):
        """批量训练接口"""
        # 计算TD目标
        current_v = self.critic(states).squeeze()
        next_v = self.critic(next_states).squeeze().detach()
        td_target = rewards + self.gamma * next_v * (1 - dones)

        # Critic损失
        critic_loss = F.mse_loss(current_v, td_target)

        # Actor损失
        logits = self.actor(states)
        dist = Categorical(logits=logits)
        log_probs = dist.log_prob(actions)
        entropy = dist.entropy().mean()

        advantages = (td_target - current_v).detach()
        actor_loss = -(log_probs * advantages).mean() - self.entropy_coef * entropy

        # 合并优化
        total_loss = critic_loss + actor_loss
        self.optimizer.zero_grad()
        total_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 0.5)
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 0.5)
        self.optimizer.step()

    def update_epsilon(self):
        """探索率衰减（如果使用epsilon-greedy）"""
        # 如果agent有探索率参数的话
        if hasattr(self, 'epsilon'):
            self.epsilon = max(self.min_epsilon, self.epsilon * self.epsilon_decay)




