package com.cccyy.data.list;

import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;

public class RepeatWithEqual {
    public static void main(String[] args) {
        List<UserForListTest> lists = Arrays.asList(
                new UserForListTest(1, "Alice"),
                new UserForListTest(1, "Bob"), // 假设有重复的User，id相同
                new UserForListTest(2, "Charlie"),
                new UserForListTest(2, "Charlie")
        );

        Set<UserForListTest> distinctUserSet = new HashSet<>(lists); // 去重
        List<UserForListTest> distinctUsers = new ArrayList<>(distinctUserSet); // 转换为List

//        如果去重并保留插入顺序很重要，可以考虑使用LinkedHashSet代替HashSet。
        distinctUsers.forEach(System.out::println);

        //这是另外一种方法，需要确认类中有一个唯一补充不重复的id
        List<UserForListTest> distinctUsersById = lists.stream()
                .filter(distinctByKey(UserForListTest::getId)) // 基于User的getId方法作为去重依据
                .collect(Collectors.toList());

//        Stream<UserForListTest> stream = lists.stream();
//        Stream<UserForListTest> userForListTestStream = stream.filter();
//        Object collect = userForListTestStream.collect();
//        Collector<Object, ?, List<Object>> list = Collectors.toList();

        distinctUsersById.forEach(System.out::println);
    }

    // 辅助助方法，用于基于某个字段去重
    public static <T, K> Predicate<T> distinctByKey(Function<? super T, ? extends K> keyExtractor) {
        Set<K> seen = ConcurrentHashMap.newKeySet();
        return t -> seen.add(keyExtractor.apply(t));
    }

}
