package com.web.crawler;

import com.google.common.base.Optional;
import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary;
import com.hankcs.hanlp.seg.common.Term;
import com.optimaize.langdetect.LanguageDetector;
import com.optimaize.langdetect.LanguageDetectorBuilder;
import com.optimaize.langdetect.i18n.LdLocale;
import com.optimaize.langdetect.ngram.NgramExtractors;
import com.optimaize.langdetect.profiles.LanguageProfile;
import com.optimaize.langdetect.profiles.LanguageProfileReader;
import com.optimaize.langdetect.text.CommonTextObjectFactories;
import com.optimaize.langdetect.text.TextObject;
import com.optimaize.langdetect.text.TextObjectFactory;
import org.apache.http.HttpEntity;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.entity.ContentType;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.en.PorterStemFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.*;
import java.util.stream.Collectors;

public class WebCrawler {

    //需要下载英文文档的数量
    private static final int MAX_EN = 800;
    //需要下载中文文档的数量
    private static final int MAX_ZH = 800;
    //当前下载了多少英文文档
    private static int enCount = 0;
    //当前下载了多少中文文档
    private static int zhCount = 0;

    //url队列
    private static final Queue<MyUrl> urlQueue = new LinkedList<>();
    //记录已访问过的url，避免重复访问
    private static final Set<String> visitedUrls = new HashSet<>();
    private static CloseableHttpClient httpClient;
    // 自定义英文停用词集合
    private static final Set<String> STOP_WORDS = new HashSet<>(Arrays.asList(
    ));
    public static void main(String[] args) throws Exception {
        // 初始化HTTP客户端（设置超时10秒）
        RequestConfig config = RequestConfig.custom()
                .setConnectTimeout(10000)
                .setSocketTimeout(10000)
                .build();
        httpClient = HttpClients.custom()
                .setDefaultRequestConfig(config)
                .setUserAgent("Mozilla/5.0 (compatible; AcademicCrawler/1.0)")
                .build();
        // 1. 加载语言配置文件
        List<LanguageProfile> profiles = new ArrayList<>();
        profiles.add(new LanguageProfileReader().read(new File("C:\\Users\\jf\\IdeaProjects\\web-crawler\\src\\main\\resources\\profiles\\en")));
        profiles.add(new LanguageProfileReader().read(new File("C:\\Users\\jf\\IdeaProjects\\web-crawler\\src\\main\\resources\\profiles\\zh-CN")));
        // 2. 构建检测器
        LanguageDetector detector = LanguageDetectorBuilder.create(NgramExtractors.standard())
                .withProfiles(profiles)
                .build();

        // 3. 创建文本工厂
        TextObjectFactory textObjectFactory = CommonTextObjectFactories.forDetectingOnLargeText();

        // 初始化种子URL
        urlQueue.add(new MyUrl("https://www.globaltimes.cn", "en"));                 // 英文
        urlQueue.add(new MyUrl("https://www.sina.com.cn", "zh"));                    // 中文

        // 创建存储目录
        new File("documents/en").mkdirs();
        new File("documents/zh").mkdirs();

        // 开始爬取
        while ((enCount < MAX_EN || zhCount < MAX_ZH) && !urlQueue.isEmpty()) {
            MyUrl myUrl = urlQueue.poll();
            String url = myUrl.url;
            if (visitedUrls.contains(url)) continue;
            if (myUrl.lang.equals("en") && enCount >= MAX_EN) continue;
            if (myUrl.lang.equals("zh") && zhCount >= MAX_ZH) continue;
            visitedUrls.add(url);

            try {
                System.out.println("Processing: " + url);
                String html = fetchHtml(url);
                if (html == null) continue;

                String text = parseText(html);
                if (text.length() < 100) continue;
                // 4. 检测语言
                TextObject textObject = textObjectFactory.forText(text);
                Optional<LdLocale> result = detector.detect(textObject);
                String lang = "en";
                // 5. 处理结果
                if (result.isPresent()) {
                    lang = result.get().getLanguage();
                    System.out.println("Detected language: " + lang); // 输出 "zh" 或 "en"
                } else {
                    System.out.println("No language detected.");
                }
                lang = myUrl.lang;
                //if(enCount > MAX_EN && "en".equals(lang)) continue;
                //if(zhCount > MAX_ZH && "zh".equals(lang)) continue;
                // 保存文档
                if ("en".equals(lang) && enCount < MAX_EN) {
                    text = porterStemming(removeStopWords(text));
                    saveDocument(text, "en", url);
                    enCount++;
                    System.out.println("英文进度: " + enCount + "/" + MAX_EN);
                } else if ("zh".equals(lang) && zhCount < MAX_ZH) {
                    text = segmentToSpaceSeparated(text);
                    text = removeStopWords(text);
                    saveDocument(text, "zh", url);
                    zhCount++;
                    System.out.println("中文进度: " + zhCount + "/" + MAX_ZH);
                } else {
                    continue;
                }

                // 提取新链接
                extractLinks(html, url).forEach(newUrl -> {
                    if (!visitedUrls.contains(newUrl)) {
                        urlQueue.add(new MyUrl(newUrl, myUrl.lang));
                    }
                });
                //随机睡眠1s ~ 2s
                sleep();
            } catch (Exception e) {
                System.err.println("Error processing " + url + ": " + e.getMessage());
            }
        }

        httpClient.close();
        System.out.println("爬取完成！英文文档：" + enCount + "，中文文档：" + zhCount);
    }

    private static String fetchHtml(String url) throws IOException {
        HttpGet request = new HttpGet(url);
        try (CloseableHttpResponse response = httpClient.execute(request)) {
            if (response.getStatusLine().getStatusCode() != 200) return null;

            HttpEntity entity = response.getEntity();
            if (!ContentType.getOrDefault(entity).getMimeType().startsWith("text/html")) {
                return null;
            }
            return EntityUtils.toString(entity, StandardCharsets.UTF_8);
        }
    }

    private static void sleep() {
        Random r = new Random();
        int sleepTime = 1000 + r.nextInt(1001);
        try {
            Thread.sleep(sleepTime);
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }
    }

    private static String parseText(String html) {
        Document doc = Jsoup.parse(html);
        // 移除脚本和样式内容
        //doc.select("script, style, noscript").remove();
        // 移除所有不需要的标签和组件
        doc.select
                ("script, style, noscript, nav, footer, header, aside, iframe, " +
                        "form, button, .sidebar, .ad, .banner, .menu, .navbar, .cookie-consent")
                .remove();

        // 直接提取核心内容标签
        Elements contentElements = doc.select("p, h1, h2, h3, h4, h5, h6, article, section, main");
        return contentElements.text();

        //return doc.body().text();
    }

    private static void saveDocument(String text, String lang, String url) throws IOException {
        int count = lang.equals("zh") ? zhCount : enCount;
        String filename = "documents/" + lang + "/" + "News_" + (count + 1) + "_" + lang + ".txt";
        Files.write(Paths.get(filename), text.getBytes(StandardCharsets.UTF_8));
    }

    private static Set<String> extractLinks(String html, String baseUrl) {
        Set<String> links = new HashSet<>();
        Document doc = Jsoup.parse(html, baseUrl);
        for (Element link : doc.select("a[href]")) {
            String href = link.attr("abs:href");
            if (href.startsWith("http") && !href.contains("#")) {
                links.add(href.split("\\?")[0]); // 去除查询参数
            }
        }
        return links;
    }


    private static String segmentToSpaceSeparated(String text) {
        return HanLP.segment(text).stream().map(term -> term.word).collect(Collectors.joining(" "));
    }

    private static String removeStopWords(String text) {
        // 1. 分词
        List<Term> termList = HanLP.segment(text);

        // 2. 过滤停用词（自定义）
        List<Term> filteredTerms = termList.stream()
                .filter(term ->
                        !STOP_WORDS.contains(term.word.toLowerCase()) && // 过滤自定义停用词
                        !CoreStopWordDictionary.contains(term.word)              // 过滤停用词
                )
                .collect(Collectors.toList());

        // 3. 拼接结果
        return filteredTerms.stream()
                .map(term -> term.word)
                .collect(Collectors.joining(" "))
                .trim()
                .replaceAll("\\s+", " ");
    }

    // 英文porterStemming
    private static String porterStemming(String text) {
        // 自定义 Analyzer（分词 + 小写化 + Porter Stemming）
        Analyzer analyzer = new Analyzer() {
            @Override
            protected TokenStreamComponents createComponents(String fieldName) {
                Tokenizer tokenizer = new StandardTokenizer();
                TokenStream stream = new LowerCaseFilter(tokenizer); // 转小写
                stream = new PorterStemFilter(stream); // 词干提取
                return new TokenStreamComponents(tokenizer, stream);
            }
        };
        StringBuilder res = new StringBuilder();
        // 执行分词和词干提取
        try (TokenStream stream = analyzer.tokenStream("field", text)) {
            CharTermAttribute termAttr = stream.addAttribute(CharTermAttribute.class);
            stream.reset();
            while (stream.incrementToken()) {
                res.append(termAttr.toString()).append(" ");
            }
            stream.end();
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
        analyzer.close();
        return res.toString().trim();
    }
}