package com.glaive;

import cn.hutool.core.io.FileUtil;
import cn.hutool.core.io.file.FileReader;
import cn.hutool.core.util.StrUtil;
import io.quarkus.arc.impl.Sets;

import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.Stream;

public class DuplicateFileHandleCmd {
    static String filePath = System.getProperty("user.dir") + "\\Regex.html";

    public static void main(String[] args) throws Exception {
//        System.out.println(filePath);
//        readHtml();
        readLine();

    }

    /**
     * 读取HTML文件并删除重复行，主要用这个方法
     */
    private static void readHtml() throws IOException {
        //从文件中读取每一行的UTF-8编码数据
//        ArrayList<String> readUtf8Lines = FileUtil.readUtf8Lines(filePath, new ArrayList<>());
//        LinkedHashSet<String> hashSet = new LinkedHashSet<>(readUtf8Lines);
//        System.out.println("集合大小为：" + hashSet.size());
//        hashSet.forEach(System.out::println);
//        System.out.println("集合大小为：" + hashSet.size());

        Stream<String> stream = Files.lines(Paths.get(filePath)).distinct();
        stream.forEach(System.out::println);
    }

    /**
     * 读取HTML文件并删除重复行
     * 其他操作： 过滤等，不常用
     */
    public static void readLine() throws Exception {
        //从文件中读取每一行的UTF-8编码数据
//        ArrayList<String> readUtf8Lines = FileUtil.readUtf8Lines(filePath, new ArrayList<>());
//        LinkedHashSet<String> hashSet = new LinkedHashSet<>(readUtf8Lines);
//        System.out.println("集合大小为：" + hashSet.size());
//        hashSet.stream().filter(s -> s.contains("刑事侦缉档案4粤语")).forEach(System.out::println);
//        System.out.println("集合大小为：" + hashSet.size());

        Files
                .lines(Paths.get(filePath))
                .distinct()
                .filter(s -> s.contains("刑事侦缉档案4粤语"))
                .forEach(System.out::println);
    }
}