package com.lagou.zkboot.web.config;

import com.alibaba.druid.pool.DruidDataSource;
import com.lagou.zkboot.web.utils.JsonMapper;
import org.apache.curator.RetryPolicy;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.recipes.cache.NodeCache;
import org.apache.curator.framework.recipes.cache.NodeCacheListener;
import org.apache.curator.framework.recipes.cache.PathChildrenCache;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.zookeeper.CreateMode;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Configuration;
import org.springframework.stereotype.Component;

import javax.annotation.PostConstruct;
import javax.sql.DataSource;
import java.sql.SQLException;

@Component
public class DataSourceUtils {

    public CuratorFramework startZkClient(){
        RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000,3);
        CuratorFramework client = CuratorFrameworkFactory.builder() .connectString("127.0.0.1:2181") //server地址
                .sessionTimeoutMs(5000) .connectionTimeoutMs(3000) .retryPolicy(retryPolicy) .namespace("data-source") .build(); //
        client.start();
        return client;
    }

    private CuratorFramework client;

    private DataSource dataSource;

    private DataSourceConfig dataSourceConfig;

    @PostConstruct
    public void init() throws Exception {
        client = startZkClient();
//        client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).forPath("/config");
        client.setData().forPath("/config","{\"url\":\"jdbc:mysql://127.0.0.1:3306/test\",\"username\":\"root\",\"password\":\"root\",\"maxActive\":5,\"initialSize\":3,\"minIdle\":2}".getBytes());

        byte[] bytes = client.getData().watched().forPath("/config");
        DataSourceConfig dataSourceConfig = getDataSourceConfig(bytes);
        initDataSource(dataSourceConfig);
        final NodeCache nodeCache = new NodeCache(client, "/config");
        nodeCache.start();
        nodeCache.getListenable().addListener(new NodeCacheListener() {
            @Override
            public void nodeChanged() throws Exception {
                byte[] data = nodeCache.getCurrentData().getData();
                DataSourceConfig dataSourceConfig = getDataSourceConfig(data);
                reloadDataSource(dataSourceConfig);
            }
        });
    }

    public void updateDataSourceConfig(DataSourceConfig dataSourceConfig) throws Exception {
        client.setData().forPath("/config",JsonMapper.getInstance().toJson(dataSourceConfig).getBytes());
    }

    private DataSourceConfig getDataSourceConfig(byte[] bytes) {
        String s = new String(bytes);
        return JsonMapper.getInstance().fromJson(s, DataSourceConfig.class);
    }

    private void initDataSource(DataSourceConfig dataSourceConfig) throws SQLException {
        DruidDataSource druidDataSource = new DruidDataSource();
        druidDataSource.setUrl(dataSourceConfig.getUrl());
        druidDataSource.setUsername(dataSourceConfig.getUsername());
        druidDataSource.setPassword(dataSourceConfig.getPassword());
        druidDataSource.setMaxActive(dataSourceConfig.getMaxActive());
        druidDataSource.setInitialSize(dataSourceConfig.getInitialSize());
        druidDataSource.setMinIdle(dataSourceConfig.getMinIdle());
        druidDataSource.init();
        System.out.println("数据库初始化成功");
        this.dataSource = druidDataSource;
        this.dataSourceConfig = dataSourceConfig;
    }

    private void reloadDataSource(DataSourceConfig dataSourceConfig) throws SQLException {
        DruidDataSource oldDruidDataSource = (DruidDataSource)dataSource;
        oldDruidDataSource.close();
        System.out.println("数据库关闭成功");
        initDataSource(dataSourceConfig);
    }

    public DataSource getDataSource() {
        return dataSource;
    }

    public DataSourceConfig getDataSourceConfig() {
        return dataSourceConfig;
    }
}
