export class PPOAgent {
    constructor(stateSize, actionSize) {
        this.stateSize = stateSize;
        this.actionSize = actionSize;
        
        this.actor = this.createActor();
        this.critic = this.createCritic();
        
        this.gamma = 0.95;
        this.epsilon = 0.5;
        this.epsilon_decay = 0.9995;
        this.epsilon_min = 0.1;
        this.batchSize = 32;
    }

    createActor() {
        const model = tf.sequential();
        model.add(tf.layers.dense({
            units: 128,
            activation: 'relu',
            inputShape: [this.stateSize]
        }));
        model.add(tf.layers.dense({
            units: 64,
            activation: 'relu'
        }));
        model.add(tf.layers.dense({
            units: this.actionSize,
            activation: 'sigmoid'
        }));
        
        model.compile({
            optimizer: tf.train.adam(0.0005),
            loss: 'binaryCrossentropy'
        });
        
        return model;
    }

    createCritic() {
        const model = tf.sequential();
        model.add(tf.layers.dense({
            units: 64,
            activation: 'relu',
            inputShape: [this.stateSize]
        }));
        model.add(tf.layers.dense({
            units: 64,
            activation: 'relu'
        }));
        model.add(tf.layers.dense({
            units: 1,
            activation: 'linear'
        }));
        
        model.compile({
            optimizer: tf.train.adam(0.001),
            loss: 'meanSquaredError'
        });
        
        return model;
    }

    async getAction(state) {
        try {
            const stateTensor = tf.tensor2d([state]);
            
            if (Math.random() < this.epsilon) {
                stateTensor.dispose();
                if (this.epsilon > this.epsilon_min) {
                    this.epsilon *= this.epsilon_decay;
                }
                return Math.random() < 0.95 ? 0 : 1;
            }
            
            try {
                const actionProbs = await this.actor.predict(stateTensor);
                const actionData = await actionProbs.data();
                const action = actionData[0] > 0.7 ? 1 : 0;
                
                stateTensor.dispose();
                actionProbs.dispose();
                
                if (this.epsilon > this.epsilon_min) {
                    this.epsilon *= this.epsilon_decay;
                }
                
                return action;
            } catch (error) {
                stateTensor.dispose();
                throw error;
            }
        } catch (error) {
            console.error('Error in getAction:', error);
            return 0;
        }
    }

    async train(experiences) {
        if (experiences.length < this.batchSize) {
            return;
        }

        const batch = experiences.slice(0, this.batchSize);

        try {
            const states = tf.tensor2d(batch.map(exp => exp.state));
            const actions = tf.tensor1d(batch.map(exp => exp.action), 'int32');
            const rewards = tf.tensor1d(batch.map(exp => exp.reward));
            const nextStates = tf.tensor2d(batch.map(exp => exp.nextState));
            const dones = tf.tensor1d(batch.map(exp => exp.done ? 1 : 0));

            try {
                const values = this.critic.predict(states);
                const nextValues = this.critic.predict(nextStates);

                const returns = rewards.add(this.gamma * nextValues.mul(dones.sub(1).neg()));
                const advantages = returns.sub(values);

                const actionsMask = tf.oneHot(actions, this.actionSize);
                await this.actor.trainOnBatch(states, actionsMask);
                await this.critic.trainOnBatch(states, returns);

                values.dispose();
                nextValues.dispose();
                returns.dispose();
                advantages.dispose();
                actionsMask.dispose();
            } finally {
                states.dispose();
                actions.dispose();
                rewards.dispose();
                nextStates.dispose();
                dones.dispose();
            }
        } catch (error) {
            console.error('Error in training:', error);
        }
    }
} 