package com.pushtime.service.impl.search;

import com.pushtime.domain.HdfsLog;
import com.pushtime.repository.HdfsLogRepository;
import com.pushtime.service.search.HdfsUploadService;
import com.pushtime.web.rest.vm.DataVM;
import com.pushtime.web.rest.vm.HdfsVM;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.http.*;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.client.RestTemplate;

import java.io.*;
import java.net.URI;
import java.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;

@Service
@Transactional
public class HdfsUploadServiceImpl implements HdfsUploadService {

    // HDFS 断点大小
    @Value("${application.webhdfs.blockSize}")
    private Integer LENGTH;
    @Value("${application.webhdfs.url}")
    private String url;
    @Value("${application.webhdfs.path}")
    private String path;

    private final RestTemplate restTemplate;

    private final HdfsLogRepository hdfsLogRepository;

    public HdfsUploadServiceImpl(RestTemplate restTemplate, HdfsLogRepository hdfsLogRepository) {
        this.restTemplate = restTemplate;
        this.hdfsLogRepository = hdfsLogRepository;
    }

    /**
     * 创建 HDFS 上传文件
     * 追加文件
     *
     * @param hdfsVM
     * @return 返回保存结果
     */
    @Override
    public HdfsLog createHdfsUpload(HdfsVM hdfsVM) {
        // region 设置下载初始信息
        HdfsLog hdfsLog = new HdfsLog().src(hdfsVM.getSrcFile()).dst(hdfsVM.getDstFile())
            .createdDate(Instant.now()).process(0D).currentSize(0L).name("upload");
        // 开始上传
        try {
            File file = new File(hdfsVM.getSrcFile());
            if (file.exists() && file.isFile()) {
                hdfsLog.setTotalSize(file.length());
                // 拼装请求头信息
                HttpHeaders headers = new HttpHeaders();
                HttpEntity<String> requestEntity = new HttpEntity<>("", headers);
                // 拼装参数信息
                Map<String, Object> params = new HashMap<>();
                params.put("op", "CREATE");
                ResponseEntity<Object> exchangeEntity = restTemplate
                    .exchange(url + path + hdfsVM.getDstFile() + "?op={op}",
                        HttpMethod.PUT, requestEntity, Object.class, params);
                // 如果获得 302 跳转，则进行下面的操作
                if (exchangeEntity.getStatusCode().equals(HttpStatus.TEMPORARY_REDIRECT)) {
                    hdfsLog.setState(1);
                    URI location = exchangeEntity.getHeaders().getLocation();
                    if (location != null) {
                        hdfsLog.setDesc(location.toString());
                        // region 请求上传文件
                        // 读取文件信息并上传
                        HttpEntity<byte[]> sendEntity = new HttpEntity<>(null, new HttpHeaders());
                        ResponseEntity<Object> exchangeResponse = restTemplate.exchange(location,
                            HttpMethod.PUT,
                            sendEntity, Object.class);
                        // 更新当前下载进度
                        hdfsLog.setCurrentSize(0L);
                        hdfsLog.setProcess(100.0 * hdfsLog.getCurrentSize() / hdfsLog.getTotalSize());
                        // endregion
                    }
                } else {
                    // 返回状态码错误
                    hdfsLog.setState(2);
                    hdfsLog.setDesc("error to connect hdfs server.");
                }
            } else {
                // 文件不存在，返回上传失败
                hdfsLog.setTotalSize(0L);
                hdfsLog.setState(2);
                hdfsLog.setDesc("file does not exsit.");
            }
        } catch (Exception e) {
            hdfsLog.setState(2);
            hdfsLog.setTotalSize(0L);
            hdfsLog.setDesc(e.getMessage());
        }
        return hdfsLogRepository.save(hdfsLog);
    }

    /**
     * 执行 下载任务
     *
     * @param hdfsLog 任务详情
     * @return 返回下载结果
     */
    @Override
    public DataVM startUpload(HdfsLog hdfsLog, URI uri, long offset, int length) {

        return null;
    }

    @Override
    public void execute(HdfsLog hdfsLog, byte[] bytes, int length) throws IOException {
        // 拼装请求头信息
        HttpHeaders headers = new HttpHeaders();
        HttpEntity<String> requestEntity = new HttpEntity<>("", headers);
        // 拼装参数信息
        Map<String, Object> params = new HashMap<>();
        params.put("op", "APPEND");
        ResponseEntity<Object> exchangeEntity = restTemplate
            .exchange(url + path + hdfsLog.getDst() + "?op={op}",
                HttpMethod.POST, requestEntity, Object.class, params);
        // 如果获得 302 跳转，则进行下面的操作
        if (exchangeEntity.getStatusCode().equals(HttpStatus.TEMPORARY_REDIRECT)) {
            hdfsLog.setState(1);
            URI location = exchangeEntity.getHeaders().getLocation();
            if (location != null) {
                hdfsLog.setDesc(location.toString());
                HttpEntity<byte[]> sendEntity;
                if (bytes.length > length) {
                    // 128M  10K
                    byte[] tmp = new byte[length];
                    System.arraycopy(bytes, 0, tmp, 0, tmp.length);
                    sendEntity = new HttpEntity<>(tmp, new HttpHeaders());
                } else {
                    sendEntity = new HttpEntity<>(bytes, new HttpHeaders());
                }
                ResponseEntity<Object> exchangeResponse = restTemplate.exchange(location,
                    HttpMethod.POST,
                    sendEntity, Object.class);
                // 更新当前下载进度
                hdfsLog.setCurrentSize(hdfsLog.getCurrentSize() + length);
                hdfsLog.setProcess(100.0 * hdfsLog.getCurrentSize() / hdfsLog.getTotalSize());
                // endregion
            }
        } else {
            // 返回状态码错误
            hdfsLog.setState(2);
            hdfsLog.setDesc("error to connect hdfs server.");
        }
        hdfsLogRepository.save(hdfsLog);
    }


    /**
     * 获取下载状态
     *
     * @param id 任务编号
     * @return 返回下载状态信息
     */
    @Override
    public Optional<HdfsLog> getStatus(Long id) {
        return hdfsLogRepository.findById(id);
    }
}
