package com.pushtime.web.rest.search;

import com.pushtime.domain.HdfsLog;
import com.pushtime.repository.HdfsLogRepository;
import com.pushtime.service.search.HdfsUploadService;
import com.pushtime.web.rest.vm.HdfsVM;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.core.task.TaskExecutor;
import org.springframework.http.*;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.client.RestTemplate;
import javax.validation.Valid;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;

@RestController
@RequestMapping("/api/search")
public class HdfsUploadResource {

    // HDFS 断点大小
    @Value("${application.webhdfs.blockSize}")
    private Integer LENGTH;

    private final HdfsUploadService hdfsUploadService;

    private final TaskExecutor taskExecutor;

    private final HdfsLogRepository hdfsLogRepository;

    private final RestTemplate restTemplate;

    @Value("${application.webhdfs.url}")
    private String url;
    @Value("${application.webhdfs.path}")
    private String path;


    public HdfsUploadResource(HdfsUploadService hdfsUploadService,
                              TaskExecutor taskExecutor, HdfsLogRepository hdfsLogRepository,
                              RestTemplate restTemplate) {
        this.hdfsUploadService = hdfsUploadService;
        this.taskExecutor = taskExecutor;
        this.hdfsLogRepository = hdfsLogRepository;
        this.restTemplate = restTemplate;
    }

    @PostMapping("/write-files")
    public ResponseEntity<HdfsLog> createHdfsDownload(@RequestBody @Valid HdfsVM hdfsVM) {
        HdfsLog hdfsLog = hdfsUploadService.createHdfsUpload(hdfsVM);
        if (hdfsLog.getState() == 1) {
            // 线程池完成断点续传上传功能
            taskExecutor.execute(() -> {

                // region 文件上传
                try {
                    File file = new File(hdfsLog.getSrc());
                    if (file.exists() && file.isFile()) {
                        long fileLength = file.length();
                        // 文件一次无法上传完成，继续上传
                        // region 请求上传文件
                        FileInputStream fileInputStream = new FileInputStream(file);
                        // region 循环上传
                        // 读取文件信息并上传
                        byte[] bytes = new byte[LENGTH];
                        int length;
                        // 跳过初始部分上传
//                            fileInputStream.read(bytes, 0, bytes.length);
                        while ((length = fileInputStream.read(bytes, 0, bytes.length)) != -1) {
                            hdfsUploadService.execute(hdfsLog, bytes, length);
                        }
                        fileInputStream.close();
                    }

                } catch (IOException e) {
                    // 返回状态码错误
                    hdfsLog.setState(2);
                    hdfsLog.setDesc("read file error");
                    hdfsLogRepository.save(hdfsLog);
                }
// endregion
        });
        }
        return ResponseEntity.ok().body(hdfsLog);
    }


}
