package com.zhava.crawler.adapter.web;

import com.zhava.crawler.client.api.CrawlerApi;
import com.zhava.crawler.client.request.CrawlerRequest;
import com.zhava.crawler.client.request.PaginatedCrawlerRequest;
import com.zhava.crawler.client.response.CrawlerResponse;
import com.zhava.crawler.client.response.PaginatedCrawlerResponse;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.Parameter;
import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.responses.ApiResponses;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

/**
 * 爬虫控制器
 *
 * @author zhaxiang
 */
@RestController
@RequestMapping("/api/crawler")
@Tag(name = "爬虫接口", description = "提供爬取数据的HTTP接口")
public class CrawlerController {

    @Autowired
    private CrawlerApi crawlerApi;

    /**
     * 执行爬取操作
     * <p>
     * 请求示例：
     * {
     *   "sourceUrl": "https://example.com",
     *   "depth": 1,
     *   "extractLinks": true,
     *   "headers": {
     *     "Cookie": "sessionId=abc123"
     *   },
     *   "cssSelector": "div.content",
     *   "outputFormat": "JSON",
     *   "exportFile": 0
     * }
     *
     * @param request 爬虫请求参数
     * @return 爬虫响应结果
     */
    @PostMapping("/crawl")
    @Operation(
        summary = "爬取单个页面数据", 
        description = "根据提供的URL和参数爬取数据，不支持多级爬取",
        tags = {"爬虫接口"}
    )
    @ApiResponses(value = {
        @ApiResponse(
            responseCode = "200", 
            description = "爬取成功",
            content = @Content(mediaType = "application/json", schema = @Schema(implementation = CrawlerResponse.class))
        ),
        @ApiResponse(
            responseCode = "400", 
            description = "请求参数错误",
            content = @Content(mediaType = "application/json", schema = @Schema(implementation = CrawlerResponse.class))
        ),
        @ApiResponse(
            responseCode = "500", 
            description = "服务器内部错误",
            content = @Content(mediaType = "application/json", schema = @Schema(implementation = CrawlerResponse.class))
        )
    })
    public ResponseEntity<CrawlerResponse> crawl(
            @Parameter(description = "爬虫请求参数", required = true) 
            @RequestBody CrawlerRequest request) {
        CrawlerResponse response = crawlerApi.crawl(request);

        // 根据状态码返回对应的HTTP状态
        if (response.getStatusCode() != null && response.getStatusCode() >= 400) {
            return ResponseEntity.badRequest().body(response);
        }

        return ResponseEntity.ok(response);
    }
    
    /**
     * 执行分页式爬取操作
     * <p>
     * 请求示例：
     * {
     *   "sourceUrl": "https://www.amazon.com/-/zh/product-reviews/B077P5BTCY/ref=cm_cr_arp_d_paging_btm_next_2?ie=UTF8&reviewerType=all_reviews&pageNumber=1",
     *   "pageParameterName": "pageNumber",
     *   "startPage": 1,
     *   "endPage": 3,
     *   "headers": {
     *     "Cookie": "sessionId=abc123"
     *   },
     *   "cssSelector": "div.review",
     *   "outputFormat": "JSON",
     *   "exportFile": 0
     * }
     *
     * @param request 分页式爬虫请求参数
     * @return 分页式爬虫响应结果
     */
    @PostMapping("/crawl-paginated")
    @Operation(
        summary = "爬取多个分页数据", 
        description = "根据提供的URL和页码参数爬取多个分页数据，支持自动变换页码参数",
        tags = {"爬虫接口"}
    )
    @ApiResponses(value = {
        @ApiResponse(
            responseCode = "200", 
            description = "爬取成功",
            content = @Content(mediaType = "application/json", schema = @Schema(implementation = PaginatedCrawlerResponse.class))
        ),
        @ApiResponse(
            responseCode = "400", 
            description = "请求参数错误",
            content = @Content(mediaType = "application/json", schema = @Schema(implementation = PaginatedCrawlerResponse.class))
        ),
        @ApiResponse(
            responseCode = "500", 
            description = "服务器内部错误",
            content = @Content(mediaType = "application/json", schema = @Schema(implementation = PaginatedCrawlerResponse.class))
        )
    })
    public ResponseEntity<PaginatedCrawlerResponse> crawlPaginated(
            @Parameter(description = "分页式爬虫请求参数", required = true) 
            @RequestBody PaginatedCrawlerRequest request) {
        PaginatedCrawlerResponse response = crawlerApi.crawlPaginated(request);

        // 根据状态码返回对应的HTTP状态
        if (response.getStatusCode() != null && response.getStatusCode() >= 400) {
            return ResponseEntity.badRequest().body(response);
        }

        return ResponseEntity.ok(response);
    }
} 