package com.sg.java.util;

import cn.hutool.core.util.ObjectUtil;
import java.io.*;
import lombok.SneakyThrows;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVParser;
import org.apache.commons.csv.CSVRecord;
import org.apache.commons.lang3.tuple.Triple;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;

import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;


public class CsvUtils {


    public static ConcurrentHashMap<String, String> CsvToObject(String filePath, String[] headers, String month, String fileName, Map<String, Void> METER_IDS) throws
                                                                                                                                                               Exception {
        String file = findFile(filePath, fileName);


        ConcurrentHashMap<String, String> hashMap = new ConcurrentHashMap<>();
        //创建CSVFormat
        CSVFormat formator = CSVFormat.DEFAULT.withHeader(headers);

        FileReader fileReader = null;
        CSVParser  parser     = null;
        int        num        = 0;
        try {
            fileReader = new FileReader(file);
            parser     = new CSVParser(fileReader, formator);
            for (CSVRecord next : parser) {
                if (ObjectUtil.isNull(next) || (METER_IDS != null && !METER_IDS.containsKey(next.get(2)))) {
                    continue;
                }
                String str = next.get(0) + "!@@" + next.get(1) + "!@@" + next.get(3) + "!@@" + next.get(4) + "!@@" + next.get(5)+"!@@"+next.get(6) +"!@@"+next.get(7) +"!@@"+next.get(8) +"!@@"+next.get(9);
                hashMap.put(next.get(2), str);
            }
        }
        catch (Exception e) {
            e.printStackTrace();
        }
        finally {
            try {
                parser.close();
                fileReader.close();
            }
            catch (Exception e) {
                e.printStackTrace();
            }
        }
        return hashMap;
    }



    public static ConcurrentHashMap<String, String> CsvToObject2(String filePath, String[] headers, String month, String fileName, Map<String, Void> METER_IDS) throws
            Exception {
        String file = findFile(filePath, fileName);


        ConcurrentHashMap<String, String> hashMap = new ConcurrentHashMap<>();
        //创建CSVFormat
        CSVFormat formator = CSVFormat.DEFAULT.withHeader(headers);

        FileReader fileReader = null;
        CSVParser  parser     = null;
        int        num        = 0;
        try {
            fileReader = new FileReader(file);
            parser     = new CSVParser(fileReader, formator);
            for (CSVRecord next : parser) {
                if (ObjectUtil.isNull(next) || (METER_IDS != null && !METER_IDS.containsKey(next.get(2)))) {
                    continue;
                }
                String str = next.get(0) + "!@@" + next.get(1) + "!@@" + next.get(3) + "!@@" + next.get(4) + "!@@" + next.get(5)+"!@@"+next.get(6);
                hashMap.put(next.get(2), str);
            }
        }
        catch (Exception e) {
            e.printStackTrace();
        }
        finally {
            try {
                parser.close();
                fileReader.close();
            }
            catch (Exception e) {
                e.printStackTrace();
            }
        }
        return hashMap;
    }

    public static ConcurrentHashMap<String, String> sparkCsvToObject(String filePath, String[] headers, String month, String fileName, Map<String, Void> METER_IDS) throws
                                                                                                                                                                    Exception {
        final FileStatus        fileStatus = sparkFindFile(filePath, fileName);
        final FSDataInputStream in         = HdfsUtils.fs.open(fileStatus);


        ConcurrentHashMap<String, String> hashMap = new ConcurrentHashMap<>();
        //创建CSVFormat
        CSVFormat formator = CSVFormat.DEFAULT.withHeader(headers);

        Reader    reader = null;
        CSVParser parser = null;
        try {
            reader = new InputStreamReader(in);
            parser = new CSVParser(reader, formator);
            for (CSVRecord next : parser) {
                if (ObjectUtil.isNull(next) || (METER_IDS != null && !METER_IDS.containsKey(next.get(2)))) {
                    continue;
                }
                String str = next.get(0) + "!@@" + next.get(1) + "!@@" + next.get(3) + "!@@" + next.get(4) + "!@@" + next.get(5);
                hashMap.put(next.get(2), str);
            }
        }
        catch (Exception e) {
            e.printStackTrace();
        }
        finally {
            try {
                parser.close();
                reader.close();
//                jp.close();
//                jedis.close();

            }
            catch (Exception e) {
                e.printStackTrace();
            }
        }
        return hashMap;
    }


    public static ConcurrentHashMap<String, String> CsvToPb(String filePath, String[] headers, String fileName) throws
                                                                                                                Exception {


        String file = findFile(filePath, fileName);

        ConcurrentHashMap<String, String> hashMap = new ConcurrentHashMap<>();
        //创建CSVFormat
        CSVFormat formator = CSVFormat.DEFAULT.withHeader(headers);

        FileReader fileReader = null;
        CSVParser  parser     = null;
        try {
            fileReader = new FileReader(file);
            parser     = new CSVParser(fileReader, formator);
            for (CSVRecord next : parser) {
                if (ObjectUtil.isNull(next)) {
                    continue;
                }

//                hashMap.put(next.get(0), next.get(1));
                hashMap.put(next.get(0), next.get(1));
            }
        }
        catch (IOException e) {
            e.printStackTrace();
        }
        finally {
            try {
                parser.close();
                fileReader.close();

            }
            catch (IOException e) {
                e.printStackTrace();
            }
        }
        return hashMap;
    }

    public static ConcurrentHashMap<String, String> sparkCsvToPb(String filePath, String[] headers, String fileName) throws
                                                                                                                     Exception {
        final FileStatus                  fileStatus = sparkFindFile(filePath, fileName);
        final FSDataInputStream           in         = HdfsUtils.fs.open(fileStatus);
        ConcurrentHashMap<String, String> hashMap    = new ConcurrentHashMap<>();
        //创建CSVFormat
        CSVFormat formator = CSVFormat.DEFAULT.withHeader(headers);

        Reader    reader = null;
        CSVParser parser = null;
        try {
            reader = new InputStreamReader(in);
            parser = new CSVParser(reader, formator);
            for (CSVRecord next : parser) {
                if (ObjectUtil.isNull(next)) {
                    continue;
                }
                hashMap.put(next.get(0), next.get(1));
            }
        }
        catch (IOException e) {
            e.printStackTrace();
        }
        finally {
            try {
                parser.close();
                reader.close();
            }
            catch (IOException e) {
                e.printStackTrace();
            }
        }
        return hashMap;
    }

    public static ConcurrentHashMap<String, String> CsvToPbCd(String filePath, String[] headers, String fileName, String month) throws
                                                                                                                                Exception {


        String                     file    = findFile(filePath, fileName);
        ConcurrentHashMap<String, String> hashMap = new ConcurrentHashMap<>();
        //创建CSVFormat
        CSVFormat formator = CSVFormat.DEFAULT.withHeader(headers);

        FileReader fileReader = null;
        CSVParser  parser     = null;
        int        num        = 0;
        try {
            fileReader = new FileReader(file);
            parser     = new CSVParser(fileReader, formator);
            for (CSVRecord next : parser) {

                if (ObjectUtil.isNull(next)) {
                    continue;
                }

//         //全部的配变都先按找220V来算
                String str = next.get(1) + "!@@" + next.get(2) + "!@@" + next.get(3) + "!@@" + next.get(4) + "!@@" + next.get(5) + "!@@" + Optional.ofNullable(next.get(6)).orElse("0");
                hashMap.put(next.get(0), str);


//                String ctKey = "RDT@" + month + "@" + next.get(4) + "@" + next.get(1);
//                jp.sadd(ctKey, next.get(1));
////                jedis.sadd(ctKey, next.get(1));
//                num++;
//                if (num > 10000) {
//                    jp.sync();
//                    num = 0;
//                }
            }
//            jp.sync();
        }
        catch (IOException e) {
            e.printStackTrace();
        }
        finally {
            try {
                parser.close();
                fileReader.close();
//                jp.close();
//                jedis.close();
            }
            catch (IOException e) {
                e.printStackTrace();
            }
        }
        return hashMap;
    }

    public static ConcurrentHashMap<String, String> sparkCsvToPbCd(String filePath, String[] headers, String fileName, String month) throws
                                                                                                                                     Exception {

        final FileStatus                  fileStatus = sparkFindFile(filePath, fileName);
        final FSDataInputStream           in         = HdfsUtils.fs.open(fileStatus);
        ConcurrentHashMap<String, String> hashMap    = new ConcurrentHashMap<>();
        //创建CSVFormat
        CSVFormat formator = CSVFormat.DEFAULT.withHeader(headers);

        Reader    reader = null;
        CSVParser parser = null;
        try {
            reader = new InputStreamReader(in);
            parser = new CSVParser(reader, formator);
            for (CSVRecord next : parser) {

                if (ObjectUtil.isNull(next)) {
                    continue;
                }
                String str = next.get(1) + "!@@" + next.get(2) + "!@@" + next.get(3) + "!@@" + next.get(4) + "!@@" + next.get(5) + "!@@" + next.get(6);
                hashMap.put(next.get(0), str);
            }
        }
        catch (IOException e) {
            e.printStackTrace();
        }
        finally {
            try {
                parser.close();
                reader.close();
            }
            catch (IOException e) {
                e.printStackTrace();
            }
        }
        return hashMap;
    }


    public static String findFile(String filePath, String zpbyhName) throws Exception {
        File     file  = new File(filePath);
        String[] names = file.list();
        for (String name : names) {
            boolean a = name.startsWith(zpbyhName);
            if (a) {
                return filePath + name;
            }
        }

        throw new Exception("文件名有误");
    }

    public static FileStatus sparkFindFile(String filePath, String zpbyhName) throws Exception {
        final FileStatus[] fileStatuses = HdfsUtils.fs.listStatus(new Path(filePath));
        for (FileStatus fileStatus : fileStatuses) {
            String name = fileStatus.getPath().getName();
            if (name.startsWith(zpbyhName) || name.contains(zpbyhName)) {
                return fileStatus;
            }
        }
        throw new Exception("异常");
    }


    public static HashMap<String, String> ratedCapacity(String pbFilePath, String[] pbHeaders, String zFileName) throws
                                                                                                                 Exception {

        String file = findFile(pbFilePath, zFileName);


        HashMap<String, String> hashMap = new HashMap<>();
        //创建CSVFormat
        CSVFormat formator = CSVFormat.DEFAULT.withHeader(pbHeaders);

        FileReader fileReader = null;
        CSVParser  parser     = null;
        try {
            fileReader = new FileReader(file);
            parser     = new CSVParser(fileReader, formator);
            for (CSVRecord next : parser) {
//                if ("".equals(next.get(3))) {
//                    continue;
//                }
                String   s     = next.get(0);
                String[] split = s.split(",");
                hashMap.put(next.get(1), next.get(5));
            }
        }
        catch (IOException e) {
            e.printStackTrace();
        }
        finally {
            try {
                parser.close();
                fileReader.close();
            }
            catch (IOException e) {
                e.printStackTrace();
            }
        }
        return hashMap;
    }


    //k - cdId  v - 配变id
    @SneakyThrows
    public static HashMap<String, Triple<String, String, String>> CsvToPbCd2(String filePath, String[] headers, String fileName) {

        String                                          file    = findFile(filePath, fileName);
        HashMap<String, Triple<String, String, String>> hashMap = new HashMap<>();
        //创建CSVFormat
        CSVFormat formator = CSVFormat.DEFAULT.withHeader(headers);

        FileReader fileReader = null;
        CSVParser  parser     = null;
        try {
            fileReader = new FileReader(file);
            parser     = new CSVParser(fileReader, formator);
            for (CSVRecord next : parser) {
                if ("".equals(next.get(3))) {
                    continue;
                }

                hashMap.put(next.get(0), Triple.of(next.get(1), next.get(2), next.get(3)));
            }
        }
        catch (IOException e) {
            e.printStackTrace();
        }
        finally {
            try {
                parser.close();
                fileReader.close();
            }
            catch (IOException e) {
                e.printStackTrace();
            }
        }
        return hashMap;
    }

    public static HashMap<String, Integer> CsvToDyCount(String path, String[] headers, String yhdy) throws Exception {
        String                            file    = findFile(path, yhdy);
        HashMap<String, Integer> hashMap = new HashMap<>();
        //创建CSVFormat
        CSVFormat formator = CSVFormat.DEFAULT.withHeader(headers);
        FileReader fileReader = null;
        CSVParser  parser     = null;
        int        num        = 0;
        try {
            fileReader = new FileReader(file);
            parser     = new CSVParser(fileReader, formator);
            for (CSVRecord next : parser) {
                if (ObjectUtil.isNull(next)) {
                    continue;
                }
//         //全部的配变都先按找220V来算
                hashMap.put(next.get(0), Integer.parseInt(next.get(1)));
            }

        }
        catch (IOException e) {
            e.printStackTrace();
        }
        finally {
            try {
                parser.close();
                fileReader.close();
            }
            catch (IOException e) {
                e.printStackTrace();
            }
        }
        return hashMap;

    }
}
