# frozen_string_literal: true

require 'spec_helper'

RSpec.describe Gitlab::Llm::OpenAi::Completions::ExplainVulnerability, feature_category: :vulnerability_management do
  let(:prompt_class) { Gitlab::Llm::Templates::ExplainVulnerability }
  let_it_be(:user) { create(:user) }
  let_it_be(:project) do
    create(:project, :custom_repo, files: {
      'main.c' => "#include <stdio.h>\n\nint main() { printf(\"hello, world!\"); }"
    })
  end

  let_it_be(:vulnerability) { create(:vulnerability, :with_finding, project: project) }

  subject(:explain) { described_class.new(prompt_class, { request_id: 'uuid' }) }

  before do
    allow(GraphqlTriggers).to receive(:ai_completion_response)
    vulnerability.finding.location['file'] = 'main.c'
    vulnerability.finding.location['start_line'] = 1

    stub_feature_flags(explain_vulnerability_vertex: false)
  end

  describe '#execute' do
    context 'when the chat client returns an unsuccessful response' do
      before do
        allow_next_instance_of(Gitlab::Llm::OpenAi::Client) do |client|
          allow(client).to receive(:chat).and_return(
            { 'error' => 'Ooops...' }.to_json
          )
        end
      end

      it 'publishes the error to the graphql subscription' do
        explain.execute(user, vulnerability, {})

        expect(GraphqlTriggers).to have_received(:ai_completion_response)
          .with(user.to_global_id, vulnerability.to_global_id, hash_including({
            id: anything,
            model_name: vulnerability.class.name,
            response_body: '',
            request_id: 'uuid',
            errors: ['Ooops...']
          }))
      end
    end

    context 'when the chat client returns a successful response' do
      let(:example_answer) do
        <<-AI.strip
        As an AI language model, I cannot access or analyze specific files or code.
        However, based on the limited information provided, "Test vulnerability 1"
        could refer to a security flaw in a particular software or application.
        It is likely that the vulnerability could be exploited by an attacker to gain
        unauthorized access to sensitive data or execute malicious code.

        To fix this vulnerability, the code in question needs to be analyzed and modified
        to remove any security flaws. The "Test remediations" section of the README.md
        file should provide step-by-step instructions on how to fix the vulnerability,
        which may involve updating the software or application, applying security patches,
        or implementing additional security measures. It is important to follow these
        instructions carefully to ensure that the vulnerability is fully
        addressed and the system remains secure.
        AI
      end

      let(:example_response) do
        {
          'id' => 'chatcmpl-74uDpPnYHVPwLg0RIM6recPqgZKm5',
          'object' => 'chat.completion',
          'created' => 1681403785,
          'model' => 'gpt-3.5-turbo-0301',
          'usage' => {
            'prompt_tokens' => 59,
            'completion_tokens' => 155,
            'total_tokens' => 214
          },
          'choices' => [
            {
              'message' => {
                'role' => 'assistant',
                'content' => example_answer
              },
              'finish_reason' => 'stop',
              'index' => 0
            }
          ]
        }
      end

      before do
        allow_next_instance_of(Gitlab::Llm::OpenAi::Client) do |client|
          allow(client).to receive(:chat).and_return(example_response.to_json)
        end
      end

      it 'publishes the content from the AI response' do
        explain.execute(user, vulnerability, {})

        expect(GraphqlTriggers).to have_received(:ai_completion_response)
          .with(user.to_global_id, vulnerability.to_global_id, hash_including({
            id: anything,
            model_name: vulnerability.class.name,
            response_body: example_answer,
            request_id: 'uuid',
            errors: []
          }))
      end
    end
  end
end
