/*
 * Copyright (C) 2019 Qunar, Inc.
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <https://www.gnu.org/licenses/>.
 */

package qunar.tc.bistoury.commands.arthas;

import com.google.common.collect.ImmutableSet;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.SettableFuture;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import qunar.tc.bistoury.agent.common.ResponseHandler;
import qunar.tc.bistoury.agent.common.job.ContinueResponseJob;
import qunar.tc.bistoury.commands.arthas.telnet.Telnet;
import qunar.tc.bistoury.commands.arthas.telnet.TelnetStore;
import qunar.tc.bistoury.common.BistouryConstants;
import qunar.tc.bistoury.common.NamedThreadFactory;
import qunar.tc.bistoury.remoting.netty.AgentRemotingExecutor;
import qunar.tc.bistoury.remoting.netty.Task;

import java.util.Set;
import java.util.concurrent.Executors;

/**
 * @author zhenyu.nie created on 2018 2018/10/15 18:55
 */
public class ArthasTask implements Task {

    private static final Logger logger = LoggerFactory.getLogger(ArthasTask.class);

    private static final ListeningExecutorService SHUTDOWN_EXECUTOR = MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor(new NamedThreadFactory("shutdown_attach")));

    private static final Set<String> SHUTDOWN_COMMANDS = ImmutableSet.of(BistouryConstants.SHUTDOWN_COMMAND, BistouryConstants.STOP_COMMAND);

    private final TelnetStore telnetStore;

    private final String id;

    private final long maxRunningMs;

    private final int pid;

    private final String command;

    private final ResponseHandler handler;

    private final SettableFuture<Integer> future = SettableFuture.create();

    public ArthasTask(TelnetStore telnetStore, String id, long maxRunningMs, int pid, String command, ResponseHandler handler) {
        this.telnetStore = telnetStore;
        this.id = id;
        this.maxRunningMs = maxRunningMs;
        this.pid = pid;
        this.command = command;
        this.handler = handler;
    }

    @Override
    public String getId() {
        return id;
    }

    @Override
    public long getMaxRunningMs() {
        return maxRunningMs;
    }

    @Override
    public ContinueResponseJob createJob() {
        if (isShutdownCommand(command.trim())) {
            return new Job(SHUTDOWN_EXECUTOR);
        } else {
            return new Job(AgentRemotingExecutor.getExecutor());
        }
    }

    @Override
    public ListenableFuture<Integer> getResultFuture() {
        return future;
    }

    private boolean isShutdownCommand(String realCommand) {
        return SHUTDOWN_COMMANDS.contains(realCommand);
    }

    private class Job implements ContinueResponseJob {

        private final ListeningExecutorService executor;

        private Telnet telnet;

        private Job(ListeningExecutorService executor) {
            this.executor = executor;
        }

        @Override
        public String getId() {
            return id;
        }

        @Override
        public void init() throws Exception {
            telnet = telnetStore.getTelnet(pid);
            telnet.write(command);
        }

        /**
         * 《重要提示》：
         * 这里原有的逻辑是从arthas里读取数据就给前端发送，但是数据太大的时候就只能分好几次返回，单个断点时没问题。
         * 多个断点并发返回时就有问题（会造成数据错乱），每个断点都拆分后返回给前端，但是网络延迟加上并发导致前端并没有按照原来拆分的顺序接收。
         * 所以这里改为一次性读取每个断点的所有数据后再返回给前端，这样就不会存在数据错乱的问题了。
         *  TODO 这个方法是否需要 synchronized 修饰？
         * @return
         * @throws Exception
         */
        @Override
        public synchronized boolean doResponse() throws Exception {
            byte[] total = new byte[0];
            while (true) {
                byte[] bytes = telnet.read();
                if (bytes == null) {
                    break;
                }

                if (bytes.length > 0) {
                    byte[] newData = new byte[total.length + bytes.length];
                    System.arraycopy(total, 0, newData, 0, total.length);
                    System.arraycopy(bytes, 0, newData, total.length, bytes.length);
                    total = newData;
                }

                //TODO 查询所有类的请求经常会阻塞，JarDebugCommand 命令类已经查到了所有的类数据，
                // 然后接着继续执行完这里的时候类数据已经全部读取完整了 ，再次执行telnet.read() 时应该返回null然后跳出循环，
                // 但是奇怪的是telnet.read()方法执行就阻塞了，不继续执行了。所以我在这里先临时用 if(s.endsWith("\"message\":null}}") 判断一下
                // 数据读取了就直接跳出循环了，这么写实在是low，我没有时间去排查阻塞的问题了，先凑活一下吧。
                // （推测telnet.read()方法执行阻塞原因：1.线程死锁，2.未正确读取到结束符，所以telnet.read()方法返回不了null）
                String s = new String(total);
                if (s.endsWith("\"message\":null}}")) {
                    break;
                }
            }

            //从这里原有的逻辑是从arthas里读取数据就给前端发送，但是数据太大的时候就只能分好几次返回，单个断点时没问题。
            //多个断点并发返回时就有问题（会造成数据错乱），每个断点都拆分后返回给前端，但是网络延迟加上并发导致前端并没有按照原来拆分的顺序接收。
            //所以这里改为一次性读取每个断点的所有数据后再返回给前端，这样就不会存在数据错乱的问题了。
            handler.handle(total);
            return true;
        }

        @Override
        public void clear() {
            if (telnet != null) {
                telnet.close();
            }
        }

        @Override
        public void finish() throws Exception {
            future.set(0);
        }

        @Override
        public void error(Throwable t) {
            future.setException(t);
        }

        @Override
        public void cancel() {
            future.cancel(true);
        }

        @Override
        public ListeningExecutorService getExecutor() {
            return executor;
        }
    }
}
