#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：T1.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/3/23 16:14
https://mp.weixin.qq.com/s/nzB7fQhspr1W5JabdVAEvQ
'''
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import pygame
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from collections import deque
import matplotlib.pyplot as plt


# Constants
SCREEN_WIDTH = 400
SCREEN_HEIGHT = 600
PIPE_WIDTH = 70
PIPE_HEIGHT = 400
PIPE_GAP = 150
BIRD_WIDTH = 40
BIRD_HEIGHT = 40
GRAVITY = 0.5
JUMP_STRENGTH = 10


WHITE = (255, 255, 255)
BLACK = (0, 0, 0)

class Bird:
    def __init__(self, x, y):
        self.x = 50
        self.y = SCREEN_HEIGHT // 2
        self.velocity = 0

        def jump(self):
            self.velocity = -JUMP_STRENGTH

        def move(self):
            self.velocity += GRAVITY
            self.y += self.velocity

        def get_rect(self):
            return pygame.Rect(self.x, self.y, BIRD_WIDTH, BIRD_HEIGHT)


class Pipe:
    def __init__(self, x):
        self.x = x
        self.height = random.randint(50, SCREEN_HEIGHT - PIPE_GAP - 50)
        self.passed = False

    def move(self):
        self.x -= 5

    def get_upper_rect(self):
        return pygame.Rect(self.x, 0, PIPE_WIDTH, self.height)

    def get_lower_rect(self):
        return pygame.Rect(self.x, self.height + PIPE_GAP, PIPE_WIDTH, SCREEN_HEIGHT - self.height - PIPE_GAP)


class FlappyBirdEnv:
    def __init__(self):
        self.bird = Bird()
        self.pipes = [Pipe(SCREEN_WIDTH)]
        self.score = 0
        self.done = False

    def reset(self):
        self.__init__()
        return self.get_state()

    def step(self, action):
        if action == 1:
            self.bird.jump()
        self.bird.move()

        for pipe in self.pipes:
            pipe.move()
            if not pipe.passed and pipe.x < self.bird.x:
                pipe.passed = True
                self.score += 1
                self.pipes.append(Pipe(SCREEN_WIDTH))
            if pipe.x + PIPE_WIDTH < 0:
                self.pipes.remove(pipe)

        self.done = self.check_collision()
        return self.get_state(), 1 if not self.done else -1000, self.done, {}

    def get_state(self):
        return np.array([self.bird.y, self.bird.velocity, self.pipes[0].x, self.pipes[0].height], dtype=np.float32)

    def check_collision(self):
        bird_rect = self.bird.get_rect()
        for pipe in self.pipes:
            if bird_rect.colliderect(pipe.get_upper_rect()) or bird_rect.colliderect(pipe.get_lower_rect()):
                return True
        if self.bird.y < 0 or self.bird.y > SCREEN_HEIGHT:
            return True
        return False

    def render(self, screen):
        screen.fill(WHITE)
        pygame.draw.rect(screen, BLACK, self.bird.get_rect())
        for pipe in self.pipes:
            pygame.draw.rect(screen, BLACK, pipe.get_upper_rect())
            pygame.draw.rect(screen, BLACK, pipe.get_lower_rect())
        pygame.display.update()

class DQN(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(DQN, self).__init__()
        self.fc1 = nn.Linear(input_dim, 128)
        self.fc2 = nn.Linear(128, 128)
        self.fc3 = nn.Linear(128, output_dim)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        return self.fc3(x)

def train_double_dqn(env, num_episodes=1000, gamma=0.99, epsilon_start=1.0, epsilon_end=0.1, epsilon_decay=500,
                     batch_size=64):
    input_dim = 4
    output_dim = 2
    policy_net = DQN(input_dim, output_dim)
    target_net = DQN(input_dim, output_dim)
    target_net.load_state_dict(policy_net.state_dict())
    target_net.eval()

    optimizer = optim.Adam(policy_net.parameters())
    memory = deque(maxlen=10000)
    epsilon = epsilon_start
    epsilon_decay_rate = (epsilon_start - epsilon_end) / epsilon_decay
    loss_fn = nn.MSELoss()
    rewards_history = []

    for episode in range(num_episodes):
        state = env.reset()
        state = torch.FloatTensor(state)
        total_reward = 0
        for t in range(1000):
            if random.random() < epsilon:
                action = random.choice(range(output_dim))
            else:
                with torch.no_grad():
                    action = policy_net(state).argmax().item()

            next_state, reward, done, _ = env.step(action)
            next_state = torch.FloatTensor(next_state)
            memory.append((state, action, reward, next_state, done))
            state = next_state
            total_reward += reward
            if done:
                break

            if len(memory) >= batch_size:
                batch = random.sample(memory, batch_size)
                states, actions, rewards, next_states, dones = zip(*batch)
                states = torch.stack(states)
                actions = torch.tensor(actions)
                rewards = torch.tensor(rewards, dtype=torch.float32)
                next_states = torch.stack(next_states)
                dones = torch.tensor(dones, dtype=torch.uint8)

                q_values = policy_net(states).gather(1, actions.unsqueeze(1)).squeeze(1)
                next_actions = policy_net(next_states).argmax(dim=1)
                next_q_values = target_net(next_states).gather(1, next_actions.unsqueeze(1)).squeeze(1)
                target_q_values = rewards + (gamma * next_q_values * (1 - dones))

                loss = loss_fn(q_values, target_q_values)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

            epsilon = max(epsilon_end, epsilon - epsilon_decay_rate)
            rewards_history.append(total_reward)
            if episode % 10 == 0:
                target_net.load_state_dict(policy_net.state_dict())

            print(f"Episode {episode}, Total Reward: {total_reward}, Epsilon: {epsilon}")

            plt.plot(rewards_history)
            plt.xlabel('Episode')
            plt.ylabel('Total Reward')
            plt.show()
            return policy_net

def test_model(env, model):
    pygame.init()
    screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
    clock = pygame.time.Clock()
    state = env.reset()
    state = torch.FloatTensor(state)
    total_reward = 0
    while True:
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                pygame.quit()
                return

        action = model(state).argmax().item()
        next_state, reward, done, _ = env.step(action)
        state = torch.FloatTensor(next_state)
        total_reward += reward
        env.render(screen)
        clock.tick(30)
        if done:
            break
    print(f"Total Reward: {total_reward}")
    pygame.quit()

    env = FlappyBirdEnv()
    trained_model = train_double_dqn(env)
    test_model(env, trained_model)