import { BehaviorSubject } from 'rxjs';
import { terminalService, CommandResult } from './TerminalService';

export interface OllamaModel {
  name: string;
  size: string;
  modified: string;
}

export interface OllamaServiceInterface {
  listModels(): Promise<OllamaModel[]>;
  createModel(name: string, modelfile: string): Promise<CommandResult>;
  runModel(name: string, prompt: string): Promise<CommandResult>;
  deleteModel(name: string): Promise<CommandResult>;
  pullModel(name: string): Promise<CommandResult>;
  pushModel(name: string): Promise<CommandResult>;
  startServer(): Promise<CommandResult>;
  stopServer(): Promise<CommandResult>;
  serverStatus$: BehaviorSubject<boolean>;
}

class OllamaService implements OllamaServiceInterface {
  public serverStatus$ = new BehaviorSubject<boolean>(false);
  private checkServerInterval: NodeJS.Timeout | null = null;

  constructor() {
    this.startServerStatusCheck();
  }

  private async checkServerStatus(): Promise<boolean> {
    try {
      const result = await terminalService.executeCommand('curl -s http://localhost:11434/api/tags');
      return result.exitCode === 0;
    } catch {
      return false;
    }
  }

  private startServerStatusCheck() {
    if (this.checkServerInterval) return;
    
    this.checkServerInterval = setInterval(async () => {
      const isRunning = await this.checkServerStatus();
      this.serverStatus$.next(isRunning);
    }, 5000);
  }

  async listModels(): Promise<OllamaModel[]> {
    const result = await terminalService.executeCommand('ollama list');
    if (result.error || result.exitCode !== 0) {
      throw new Error(result.error || 'Failed to list models');
    }
    
    // Parse the output and convert to OllamaModel[]
    // Skip the first line (header) and empty lines
    return result.output.split('\n')
      .slice(1) // Skip the header row
      .filter(line => line.trim()) // Remove empty lines
      .map(line => {
        const [name, id, size, ...modifiedParts] = line.split(/\s+/);
        // Combine the remaining parts as the modified date/time
        const modified = modifiedParts.join(' ');
        return { name, size, modified };
      });
  }

  async createModel(name: string, modelfile: string): Promise<CommandResult> {
    return terminalService.executeCommand(`ollama create ${name} "${modelfile}"`);
  }

  async runModel(name: string, prompt: string, options: {
    format?: 'json' | 'text';
    keepAlive?: string;
    insecure?: boolean;
    noWordWrap?: boolean;
  } = {}): Promise<CommandResult> {
    let command = `ollama run ${name}`;

    if (options.format === 'json') {
      command += ' --format json';
    }
    if (options.keepAlive) {
      command += ` --keepalive ${options.keepAlive}`;
    }
    if (options.insecure) {
      command += ' --insecure';
    }
    if (options.noWordWrap) {
      command += ' --nowordwrap';
    }

    command += ` "${prompt}"`;
    return terminalService.executeCommand(command);
  }

  async deleteModel(name: string): Promise<CommandResult> {
    return terminalService.executeCommand(`ollama rm ${name}`);
  }

  async pullModel(name: string, options: { insecure?: boolean } = {}): Promise<CommandResult> {
    let command = `ollama pull ${name}`;
    if (options.insecure) {
      command += ' --insecure';
    }
    return terminalService.executeCommand(command);
  }

  async pushModel(name: string, options: { insecure?: boolean } = {}): Promise<CommandResult> {
    let command = `ollama push ${name}`;
    if (options.insecure) {
      command += ' --insecure';
    }
    return terminalService.executeCommand(command);
  }

  async startServer(options: {
    debug?: boolean;
    maxModelsPerGpu?: number;
    maxQueueRequests?: number;
    parallelRequests?: number;
    scheduledSpread?: boolean;
    flashAttention?: boolean;
  } = {}): Promise<CommandResult> {
    // Set environment variables
    const env = {
      ...(options.debug && { OLLAMA_DEBUG: '1' }),
      ...(options.maxModelsPerGpu && { OLLAMA_MAX_LOADED_MODELS: options.maxModelsPerGpu.toString() }),
      ...(options.maxQueueRequests && { OLLAMA_MAX_QUEUE: options.maxQueueRequests.toString() }),
      ...(options.parallelRequests && { OLLAMA_NUM_PARALLEL: options.parallelRequests.toString() }),
      ...(options.scheduledSpread && { OLLAMA_SCHED_SPREAD: '1' }),
      ...(options.flashAttention && { OLLAMA_FLASH_ATTENTION: '1' })
    };

    // Convert env to string of exports
    const envString = Object.entries(env)
      .map(([key, value]) => `export ${key}=${value}`)
      .join(' && ');

    const command = envString ? `${envString} && ollama serve` : 'ollama serve';
    return terminalService.executeCommand(command);
  }

  async stopServer(): Promise<CommandResult> {
    // This is a simplified implementation - you might need to adjust based on your OS
    return terminalService.executeCommand('pkill ollama');
  }
}

export const ollamaService = new OllamaService(); 