// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements.  See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.  The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.  You may obtain a copy of the License at
//
//   http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.  See the License for the
// specific language governing permissions and limitations
// under the License.

import org.apache.doris.regression.util.Hdfs

suite("test_csv_with_header", "p0,external") {
    // save load data in tmp dir.
    def localDataDir = new File(new File(System.getProperty("java.io.tmpdir")), "doris-case"+UUID.randomUUID().toString().replaceAll("-", ""))
    if (!localDataDir.mkdir()) {
        throw new IOException("create dir failed: ${localDataDir}")
    }

     //define format
    def format_csv = "csv"
    def format_csv_with_names = "csv_with_names"
    def format_csv_with_names_and_types = "csv_with_names_and_types"
    //define format data file
    def format_csv_file = "csv.txt"
    def format_csv_with_names_file =  "csv_with_names.txt"
    def format_csv_with_names_and_types_file = "csv_with_names_and_types.txt"
    def expect_rows = 10
    def testTable = "test_csv_with_header"

    def test_stream_load = {testTablex, label, format, file_path, exect_rows->
        streamLoad {
            table testTablex
            set 'label',label
            set 'column_separator',','
            set 'format',format
            file file_path
            time 10000
            check { result, exception, startTime, endTime ->
                if (exception != null) {
                    throw exception
                }
                log.info("Stream load result: ${result}".toString())
                def json = parseJson(result)
                assertEquals("success", json.Status.toLowerCase())
                assertEquals(json.NumberTotalRows, exect_rows)
                assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0)
            }
        }
    }
    def import_from_hdfs = {testTable1, label1, hdfsFilePath, format1, brokerName, hdfsUser, hdfsPasswd->
        sql """
            LOAD LABEL ${label1} (
                DATA INFILE("${hdfsFilePath}") 
                INTO TABLE ${testTable1} COLUMNS TERMINATED BY "," 
                FORMAT as "${format1}" )
                with HDFS 
                ("username"="${hdfsUser}", "password"="${hdfsPasswd}", "fs.defaultFS"="${context.config.otherConfigs.get('hdfsFs')}")
        """
    }

    def check_import_result = {checklabel, testTable4, expected_rows->
        def max_try_secs = 100000
        while(max_try_secs--) {
            def result = sql "show load where label = '${checklabel}'"
            if(result[0][2] == "FINISHED") {
                sql "sync"
                def result_count = sql "select count(*) from ${testTable4}"
                assertEquals(result_count[0][0], expected_rows)
                break
            } else {
                sleep(1000)
                max_try_secs = max_try_secs - 1000
                if(max_try_secs < 0) {
                    assertEquals(1, 2)
                }
            }
        }
    }

    sql "DROP TABLE IF EXISTS ${testTable}"
    def result1 = sql """
            CREATE TABLE IF NOT EXISTS `${testTable}` (
                `event_day` date NULL COMMENT "",
                `event_day1` datev2 NULL COMMENT "",
                `event_day2` datetimev2 NULL COMMENT "",
                `event_day3` datetimev2(3) NULL COMMENT "",
                `event_day4` datetimev2(6) NULL COMMENT "",
                `siteid` int(11) NULL DEFAULT "10" COMMENT "",
                `citycode` smallint(6) NULL COMMENT "",
                `username` varchar(32) NULL DEFAULT "" COMMENT "",
                `pv` bigint(20) NULL DEFAULT "0" COMMENT ""
                ) ENGINE=OLAP
                DUPLICATE KEY(`event_day`, `event_day1`, `event_day2`, `event_day3`, `event_day4`, `siteid`, `citycode`)
                COMMENT "OLAP"
                DISTRIBUTED BY HASH(`siteid`) BUCKETS 10
                PROPERTIES (
                "replication_allocation" = "tag.location.default: 1",
                "in_memory" = "false",
                "storage_format" = "V2"
                )
            """
    //[stream load] format=csv
    def label = UUID.randomUUID().toString()
    test_stream_load.call(testTable, label, format_csv, format_csv_file, expect_rows)

    //[stream load] format=csv_with_names 
    label = UUID.randomUUID().toString()
    test_stream_load.call(testTable, label, format_csv_with_names, format_csv_with_names_file, expect_rows)

    //[stream load] format=csv_with_names_and_types
    label = UUID.randomUUID().toString()
    test_stream_load.call(testTable, label, format_csv_with_names_and_types, format_csv_with_names_and_types_file, expect_rows)

    sql "sync"
    // check total rows
    def result_count = sql "select count(*) from ${testTable}"
    assertEquals(result_count[0][0], expect_rows*3)

    if (enableHdfs()) {
        //test import data from hdfs
        def hdfsUser = getHdfsUser()
        def brokerName =getBrokerName()
        def hdfsPasswd = getHdfsPasswd()
        def hdfsFs = getHdfsFs()
        //[broker load] test normal
        label = UUID.randomUUID().toString().replaceAll("-", "")
        def remote_csv_file = uploadToHdfs("csv_header_p0/csv.txt")
        def export_result = import_from_hdfs.call(testTable, label, remote_csv_file, format_csv, brokerName, hdfsUser, hdfsPasswd)
        check_import_result.call(label, testTable, expect_rows * 4)

        //[broker load] csv_with_names
        label = UUID.randomUUID().toString().replaceAll("-", "")
        remote_csv_file = uploadToHdfs("csv_header_p0/csv_with_names.txt")
        export_result = import_from_hdfs.call(testTable, label, remote_csv_file, format_csv_with_names, brokerName, hdfsUser, hdfsPasswd)
        check_import_result.call(label, testTable, expect_rows * 5)

         //[broker load] csv_with_names_and_types
        label = UUID.randomUUID().toString().replaceAll("-", "")
        remote_csv_file = uploadToHdfs("csv_header_p0/csv_with_names_and_types.txt")
        export_result = import_from_hdfs.call(testTable, label, remote_csv_file, format_csv_with_names_and_types, brokerName, hdfsUser, hdfsPasswd)
        check_import_result.call(label, testTable, expect_rows * 6)

        def export_to_hdfs = {exportTable, exportLable, hdfsPath, exportFormat, exportBrokerName, exportUserName, exportPasswd->
            sql """ EXPORT TABLE ${exportTable} 
                TO "${hdfsPath}" 
                PROPERTIES ("label" = "${exportLable}", "column_separator"=",","format"="${exportFormat}") 
                WITH HDFS ("username"="${exportUserName}", "password"="${exportPasswd}", "fs.defaultFS"="${context.config.otherConfigs.get('hdfsFs')}")
            """
        }

        def check_export_result = {checklabel1->
            def max_try_secs = 100000
            while(max_try_secs--) {
                def result = sql "show export where label='${checklabel1}'"
                logger.info("${result}")
                if(result[0][2] == "FINISHED") {
                    break
                } else {
                    sleep(1000)
                    max_try_secs = max_try_secs - 1000
                    if(max_try_secs < 0) {
                        assertEquals(1,2)
                    }
                }
            }
        }
        
        def download_from_hdfs= {String label_path ->
            String hdfsFs1 = context.config.otherConfigs.get("hdfsFs")
            String hdfsUser1 = context.config.otherConfigs.get("hdfsUser")
            Hdfs hdfs = new Hdfs(hdfsFs1, hdfsUser1, localDataDir.getAbsolutePath())
            return hdfs.downLoad(label_path)
        }

        def check_download_result={resultlist, fileFormat, expectedTotalRows->
            int totalLines = 0
            if(fileFormat == format_csv_with_names) {
                expectedTotalRows += resultlist.size()
            }else if(fileFormat == format_csv_with_names_and_types) {
                expectedTotalRows += resultlist.size() * 2
            }
            for(String oneFile :resultlist) {
                totalLines += getTotalLine(oneFile)
                deleteFile(oneFile)
            }
            logger.info("${resultlist}")
            assertEquals(expectedTotalRows,totalLines)
        }

        sql "sync"
        def resultCount = sql "select count(*) from ${testTable}"
        def currentTotalRows = resultCount[0][0]

        // export table to hdfs format=csv
        def hdfsDataDir = getHdfsDataDir()
        label = UUID.randomUUID().toString().replaceAll("-", "")
        export_to_hdfs.call(testTable, label, hdfsDataDir + "/" + label, format_csv, brokerName, hdfsUser, hdfsPasswd)
        check_export_result(label)
        def result = download_from_hdfs(label)
        logger.info("${result}")
        check_download_result(result, format_csv, currentTotalRows)

        // export table to hdfs format=csv_with_names
        label = UUID.randomUUID().toString().replaceAll("-", "")
        export_to_hdfs.call(testTable, label, hdfsDataDir + "/" + label, format_csv_with_names, brokerName, hdfsUser, hdfsPasswd)
        check_export_result(label)
        result = download_from_hdfs(label)
        check_download_result(result, format_csv_with_names, currentTotalRows)

        // export table to hdfs format=csv_with_names_and_types
        label = UUID.randomUUID().toString().replaceAll("-", "")
        export_to_hdfs.call(testTable, label, hdfsDataDir + "/" + label, format_csv_with_names_and_types, brokerName, hdfsUser, hdfsPasswd)
        check_export_result(label)
        result = download_from_hdfs(label)
        check_download_result(result, format_csv_with_names_and_types, currentTotalRows)
        
        // select out file to hdfs 
        def select_out_file = {outTable, outHdfsPath, outFormat, outHdfsFs, outBroker, outHdfsUser, outPasswd->
            sql "sync"
            sql """
                SELECT * FROM ${outTable}
                INTO OUTFILE "${outHdfsPath}"
                FORMAT AS ${outFormat}
                PROPERTIES
                (
                    "fs.defaultFS"="${context.config.otherConfigs.get('hdfsFs')}",
                    "column_separator" = ",",
                    "line_delimiter" = "\n",
                    "max_file_size" = "5MB",
                    "hadoop.username" = "${outHdfsUser}"
                )
            """
        }
        // select out file to hdfs format=csv
        label = UUID.randomUUID().toString().replaceAll("-", "")
        select_out_file(testTable, hdfsDataDir + "/" + label + "csv", format_csv, hdfsFs, brokerName, hdfsUser, hdfsPasswd)
        result = download_from_hdfs(label)
        check_download_result(result, format_csv, currentTotalRows)

        // select out file to hdfs format=csv_with_names
        label = UUID.randomUUID().toString().replaceAll("-", "")
        select_out_file(testTable, hdfsDataDir + "/" + label + "csv", format_csv_with_names, hdfsFs, brokerName, hdfsUser, hdfsPasswd)
        result = download_from_hdfs(label)
        check_download_result(result, format_csv_with_names, currentTotalRows)

        // select out file to hdfs format=csv_with_names_and_types
        label = UUID.randomUUID().toString().replaceAll("-", "")
        select_out_file(testTable, hdfsDataDir + "/" + label + "csv", format_csv_with_names_and_types, hdfsFs, brokerName, hdfsUser, hdfsPasswd)
        result = download_from_hdfs(label)
        check_download_result(result, format_csv_with_names_and_types, currentTotalRows)
    }    

    localDataDir.deleteDir()
}

