import * as tf from '@tensorflow/tfjs-node';
import { PerformanceMonitor } from './PerformanceMonitor';

type BackendType = 'cpu' | 'webgl' | 'wasm' | 'tpu';

export class HardwareAccelerator {
  private currentBackend: BackendType = 'cpu';
  private perfMonitor: PerformanceMonitor;

  constructor(perfMonitor: PerformanceMonitor) {
    this.perfMonitor = perfMonitor;
  }

  async init() {
    await this.detectBestBackend();
    tf.setBackend(this.currentBackend);
  }

  private async detectBestBackend(): Promise<void> {
    // 按性能降序尝试各个后端
    const backends: BackendType[] = ['tpu', 'webgl', 'wasm', 'cpu'];
    
    for (const backend of backends) {
      try {
        await tf.setBackend(backend);
        this.currentBackend = backend;
        break;
      } catch (err) {
        console.warn(`${backend} backend not available:`, err.message);
      }
    }
  }

  async checkPerformance(): Promise<number> {
    // 运行基准测试
    const start = performance.now();
    const x = tf.randomNormal([1000, 1000]);
    const y = tf.randomNormal([1000, 1000]);
    await x.matMul(y).data();
    return performance.now() - start;
  }

  shouldDowngrade(): boolean {
    const metrics = this.perfMonitor.getMetrics();
    return metrics.cpu > 0.9 || metrics.memory < 0.1;
  }

  getCurrentBackend(): BackendType {
    return this.currentBackend;
  }
}