package com.yc.testupload.crawler.test;

import jakarta.servlet.ServletException;
import jakarta.servlet.annotation.WebServlet;
import jakarta.servlet.http.HttpServlet;
import jakarta.servlet.http.HttpServletRequest;
import jakarta.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import com.yc.testupload.crawler.strategy.StrategySelector;
import com.yc.testupload.crawler.test.Question;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;

/**
 * 爬虫应用主类
 */
@WebServlet(name = "grabMainServlet", value = "/grab")
public class GrabMain extends HttpServlet {
    
    private NiukeSpider3 spider;
    private WordGenerator wordGenerator;
    private StrategySelector strategySelector;
    
    @Override
    public void init() {
        // 初始化爬虫和文档生成器
        spider = new NiukeSpider3();
        wordGenerator = new WordGenerator();
        strategySelector = new StrategySelector();
    }

    @Override
    protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
        response.setContentType("text/html;charset=UTF-8");
        
        // 获取用户输入的URL和Cookie参数
        String url = request.getParameter("url");
        String cookies = request.getParameter("cookies");
        
        // 如果没有提供URL，显示表单让用户输入
        if (url == null || url.isEmpty()) {
            showInputForm(response);
            return;
        }
        
        try {
            // 如果提供了Cookie，设置到爬虫中
            if (cookies != null && !cookies.isEmpty()) {
                spider.setCookies(cookies);
                System.out.println("已设置用户提供的Cookie信息");
            } else {
                System.out.println("未提供Cookie信息，使用默认设置");
            }
            
            // 爬取面试题 - 使用策略选择器
            List<Question> questions = crawlQuestionsWithStrategy(url);
            
            // 如果策略选择器没有提取到题目，尝试使用NiukeSpider3直接爬取
            if (questions.isEmpty()) {
                System.out.println("策略选择器提取失败，尝试使用原始爬虫方法");
                questions = spider.crawlQuestions(url);
            }
            
            // 生成Word文档
            String outputPath = "E:\\develop\\Grab\\interview_questions.docx";
            wordGenerator.generateWordDocument(questions, outputPath);
            
            // 显示成功信息
            response.getWriter().println("<html><body>");
            response.getWriter().println("<h1>爬取和生成成功！</h1>");
            response.getWriter().println("<p>已成功爬取 " + questions.size() + " 道面试题</p>");
            response.getWriter().println("<p>Word文档已生成：" + outputPath + "</p>");
            response.getWriter().println("<p><a href='grab'>返回重新爬取</a></p>");
            response.getWriter().println("</body></html>");
        } catch (Exception e) {
            // 显示错误信息
            response.getWriter().println("<html><body>");
            response.getWriter().println("<h1>操作失败！</h1>");
            response.getWriter().println("<p>错误信息：" + e.getMessage() + "</p>");
            response.getWriter().println("<p>请检查URL和Cookie是否有效，以及是否有访问权限</p>");
            response.getWriter().println("<p><a href='grab'>返回重试</a></p>");
            response.getWriter().println("</body></html>");
            e.printStackTrace();
        }
    }

    @Override
    protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
        doGet(request, response);
    }
    
    /**
     * 使用策略选择器爬取面试题
     */
    private List<Question> crawlQuestionsWithStrategy(String url) throws Exception {
        List<Question> questions = new ArrayList<>();
        
        try {
            // 首先检查是否需要使用Selenium策略
            // 对于需要JavaScript渲染的复杂网页，我们优先使用Selenium
            if (isComplexDynamicPage(url)) {
                System.out.println("检测到可能需要JavaScript渲染的页面，优先使用Selenium策略");
                
                // 直接使用Selenium策略从URL抓取
                for (com.yc.testupload.crawler.strategy.CrawlingStrategy strategy : strategySelector.getStrategies()) {
                    if (strategy instanceof com.yc.testupload.crawler.strategy.SeleniumCrawlingStrategy) {
                        com.yc.testupload.crawler.strategy.SeleniumCrawlingStrategy seleniumStrategy = 
                            (com.yc.testupload.crawler.strategy.SeleniumCrawlingStrategy) strategy;
                        
                        System.out.println("使用Selenium策略从URL抓取内容");
                        String extractedQuestionsText = seleniumStrategy.extractQuestionsFromUrl(url);
                        
                        if (!extractedQuestionsText.isEmpty()) {
                            questions = convertTextToQuestions(extractedQuestionsText, url, "");
                            if (!questions.isEmpty()) {
                                System.out.println("Selenium策略成功提取 " + questions.size() + " 道面试题");
                                return questions;
                            }
                        }
                        break;
                    }
                }
            }
            
            // 如果Selenium策略未成功或不适用，回退到原始方法
            System.out.println("使用传统HTTP请求方法获取页面内容");
            // 初始化SSL上下文，信任所有证书
            spider.initSSLContext();
            
            // 发送HTTP请求获取页面内容
            String htmlContent = spider.getHtmlContent(url);
            if (htmlContent == null || htmlContent.isEmpty()) {
                System.out.println("获取页面内容失败");
                return questions;
            }
            
            // 使用策略选择器提取面试题文本
            String extractedQuestionsText = strategySelector.extractQuestions(url, htmlContent);
            
            // 将提取的文本转换为Question对象列表
            if (!extractedQuestionsText.isEmpty()) {
                questions = convertTextToQuestions(extractedQuestionsText, url, htmlContent);
            }
            
        } catch (Exception e) {
            System.out.println("使用策略选择器爬取时发生错误: " + e.getMessage());
            throw e;
        }
        
        return questions;
    }
    
    /**
     * 判断URL是否指向可能需要JavaScript渲染的复杂页面
     */
    private boolean isComplexDynamicPage(String url) {
        // 可以根据URL特征判断是否可能是需要JavaScript渲染的页面
        String lowerUrl = url.toLowerCase();
        
        // 包含以下关键词的URL可能是复杂的动态页面
        String[] dynamicPageIndicators = {
            "exam", "test", "quiz", "interview", 
            "dynamic", "spa", "react", "vue", 
            "angular", "javascript", "ajax", 
            "nowcoder.com/exam", "niuke.com/exam"
        };
        
        for (String indicator : dynamicPageIndicators) {
            if (lowerUrl.contains(indicator)) {
                return true;
            }
        }
        
        // 如果URL包含查询参数，也可能是动态页面
        return lowerUrl.contains("?") && lowerUrl.length() > 50;
    }
    
    /**
     * 将提取的文本转换为Question对象列表
     */
    private List<Question> convertTextToQuestions(String extractedText, String url, String htmlContent) {
        List<Question> questions = new ArrayList<>();
        
        try {
            // 解析提取的文本，分割为题目和答案
            String[] lines = extractedText.split("\\n");
            Question currentQuestion = null;
            StringBuilder currentContent = new StringBuilder();
            StringBuilder currentAnswer = new StringBuilder();
            boolean isInAnswer = false;
            
            for (String line : lines) {
                line = line.trim();
                if (line.isEmpty()) continue;
                
                // 检查是否是新题目的开始
                if (line.matches("题目\\s+\\d+[：.:].*")) {
                    // 保存之前的题目（如果有）
                    if (currentQuestion != null) {
                        currentQuestion.setContent(currentContent.toString().trim());
                        currentQuestion.setAnswer(currentAnswer.toString().trim());
                        questions.add(currentQuestion);
                    }
                    
                    // 开始新的题目
                    currentQuestion = new Question();
                    currentContent.setLength(0);
                    currentAnswer.setLength(0);
                    isInAnswer = false;
                    
                    // 提取题目编号和标题
                    String number = line.replaceAll("[^\\d]", "");
                    if (number.isEmpty()) {
                        number = String.valueOf(questions.size() + 1);
                    }
                    currentQuestion.setNumber(number);
                    
                    // 提取标题（移除题目编号部分）
                    String title = line.replaceFirst("题目\\s+\\d+[：.:]\\s*", "");
                    currentQuestion.setTitle(title);
                    
                    // 将标题也添加到内容中
                    currentContent.append(line).append("\n");
                }
                // 检查是否是答案的开始
                else if (currentQuestion != null && line.matches("答案[：.:].*")) {
                    isInAnswer = true;
                    currentAnswer.append(line).append("\n");
                }
                // 添加到当前内容或答案
                else if (currentQuestion != null) {
                    if (isInAnswer) {
                        currentAnswer.append(line).append("\n");
                    } else {
                        currentContent.append(line).append("\n");
                    }
                }
            }
            
            // 保存最后一个题目
            if (currentQuestion != null) {
                currentQuestion.setContent(currentContent.toString().trim());
                currentQuestion.setAnswer(currentAnswer.toString().trim());
                questions.add(currentQuestion);
            }
            
        } catch (Exception e) {
            System.out.println("将文本转换为Question对象时发生错误: " + e.getMessage());
        }
        
        return questions;
    }
    
    /**
     * 显示输入表单
     */
    private void showInputForm(HttpServletResponse response) throws IOException {
        response.getWriter().println("<html><body>");
        response.getWriter().println("<h1>牛客网面试题爬取工具</h1>");
        response.getWriter().println("<form method='get' action='grab'>");
        response.getWriter().println("<p>请输入牛客网面试题页面URL：</p>");
        response.getWriter().println("<input type='text' name='url' size='100' value='https://www.nowcoder.com/exam/interview/92217697/test?paperId=62028486&order=0'>");
        response.getWriter().println("<br><br>");
        response.getWriter().println("<p>可选：输入Cookie信息（用于访问需要登录的页面）：</p>");
        response.getWriter().println("<textarea name='cookies' rows='4' cols='100' placeholder='请输入从浏览器中获取的Cookie信息...'></textarea>");
        response.getWriter().println("<br><br>");
        response.getWriter().println("<p><strong>提示：</strong>如何获取Cookie？</p>");
        response.getWriter().println("<ol>");
        response.getWriter().println("<li>在浏览器中登录牛客网</li>");
        response.getWriter().println("<li>打开开发者工具(F12)，切换到Network选项卡</li>");
        response.getWriter().println("<li>刷新页面，选择任意请求，查看Request Headers中的Cookie字段</li>");
        response.getWriter().println("<li>复制Cookie值粘贴到上方输入框</li>");
        response.getWriter().println("</ol>");
        response.getWriter().println("<br>");
        response.getWriter().println("<input type='submit' value='开始爬取并生成Word文档'>");
        response.getWriter().println("</form>");
        response.getWriter().println("</body></html>");
    }
    
    /**
     * 主方法，便于直接运行
     */
    public static void main(String[] args) {
        try {
            String url = "https://www.nowcoder.com/exam/interview/92217697/test?paperId=62028486&order=0";
            String outputPath = "E:\\develop\\Grab\\interview_questions.docx";
            
            NiukeSpider3 spider = new NiukeSpider3();
            WordGenerator wordGenerator = new WordGenerator();
            StrategySelector strategySelector = new StrategySelector();
            
            System.out.println("开始爬取面试题...");
            
            // 先使用策略选择器爬取
            List<Question> questions = new ArrayList<>();
            try {
                // 初始化SSL上下文
                spider.initSSLContext();
                
                // 获取页面内容
                String htmlContent = spider.getHtmlContent(url);
                
                // 使用策略选择器提取面试题
                String extractedQuestionsText = strategySelector.extractQuestions(url, htmlContent);
                
                // 如果策略选择器提取了内容，转换为Question对象
                if (!extractedQuestionsText.isEmpty()) {
                    // 简单的文本解析，这里只是示例
                    String[] questionBlocks = extractedQuestionsText.split("题目\\s+\\d+[：.:]");
                    for (int i = 1; i < questionBlocks.length; i++) {
                        Question q = new Question();
                        q.setNumber(String.valueOf(i));
                        q.setContent("题目内容：" + questionBlocks[i].trim());
                        questions.add(q);
                    }
                    System.out.println("策略选择器成功提取 " + questions.size() + " 道面试题");
                }
            } catch (Exception e) {
                System.out.println("策略选择器爬取失败: " + e.getMessage());
            }
            
            // 如果策略选择器没有提取到题目，使用原始爬虫方法
            if (questions.isEmpty()) {
                System.out.println("尝试使用原始爬虫方法...");
                questions = spider.crawlQuestions(url);
            }
            
            System.out.println("总共爬取 " + questions.size() + " 道面试题");
            
            System.out.println("开始生成Word文档...");
            wordGenerator.generateWordDocument(questions, outputPath);
            System.out.println("Word文档已成功生成：" + outputPath);
        } catch (Exception e) {
            System.err.println("操作失败：" + e.getMessage());
            e.printStackTrace();
        }
    }
}