from asyncore import read
from datetime import datetime
import json
from multiprocessing.spawn import prepare
import os
import sys

sys.path.append('/home/benoxo/picsad/benchmark/python/')

from data_reader.DataReader import DataReader
from data_reader.NsReader import NsReader
from data_reader.OrReader import OrReader
from utils.StatusThread import StatusThread
from utils.Timer import Timer

from cassandra.cluster import Cluster
from cassandra.query import BatchStatement
from cassandra.cqlengine.connection import ConsistencyLevel

class Cassandra:
    session = None
    table = ""
    data_dir = ""
    pcm_prefix = "Cassandra_"
    cassandra_pid = 28387
    point_id = 0

    def drop_table(self):
        # drop table and index if exists
        self.session.execute('DROP TABLE IF EXISTS point_cloud')
        self.session.execute("DROP INDEX IF EXISTS pc_sweep_index")
        self.session.execute("DROP INDEX IF EXISTS pc_tstamp_index")

    def create_table(self):
        # create point cloud collection
        self.session.execute("CREATE TABLE point_cloud( "
                    "id BIGINT PRIMARY KEY,"
                    "sweep INT,"
                    "x    FLOAT,"
                    "y    FLOAT,"
                    "z    FLOAT,"
                    "tstamp TIMESTAMP)")

    def create_index(self):
        # create index on timestamp
        self.session.execute("CREATE INDEX pc_sweep_index ON point_cloud(sweep)")
        self.session.execute("CREATE INDEX pc_tstamp_index ON point_cloud(tstamp)")

    def load_test(self, test_case):
        # load data from file
        reader_name = test_case["reader"]
        data_dir = test_case["dir"]
        self.data_dir = data_dir
        self.table = 'point_cloud'
        self.index_name = self.table + "_tstamp_index"
            
        # load data reader
        loadermodule = "data_reader." + reader_name
        exec('from ' + loadermodule + ' import ' + reader_name)
        reader_class = getattr(sys.modules[loadermodule], reader_name)
        reader = reader_class(data_dir)

        timer = Timer()
        timer.Start()
        self.prepare()
        timer.StopAndRecord("Init")

        # start load data
        # first create a thread to record resources usage
        status = StatusThread(os.getpid(), self.pcm_prefix + "load", test_case["dir"])
        status.start()

        cassandra_status = StatusThread(self.cassandra_pid, "cassandra_load", test_case["dir"])
        cassandra_status.start()

        sweep = 0
        while reader.has_more_sweep():
            sweep += 1
            if sweep%10 == 1:
                print(sweep)
            points = reader.read_sweep()
            tstamp = reader.read_timestamp()
            timer.Start()
            self.load_data(sweep, points, tstamp)
            timer.Pause()

        timer.StopAndRecord("Load")
        status.end()
        cassandra_status.end()

        status = StatusThread(os.getpid(), self.pcm_prefix + "close", test_case["dir"])
        status.start()
        cassandra_status = StatusThread(self.cassandra_pid, "cassandra_close", test_case["dir"])
        cassandra_status.start()
        timer.Start()
        self.close()
        timer.StopAndRecord("Close")
        timer.PrintAll()
        timer.Save(test_case["dir"] + self.pcm_prefix + "time.txt")
        status.end()
        cassandra_status.end()


    def close(self):
        self.create_index()

    def prepare(self):
        cluster = Cluster()
        self.session = cluster.connect()
        self.session.execute('DROP KEYSPACE IF EXISTS picsad')
        self.session.execute("CREATE KEYSPACE picsad WITH replication = {'class':'SimpleStrategy', 'replication_factor': 1}")
        self.session.set_keyspace('picsad')
        self.drop_table()
        self.create_table()


    def load_data(self, sweep, points, tstamp):
        # Execute copy
        point_index = 0
        while True:
            insert_sql = self.session.prepare("""
                INSERT INTO point_cloud (id, sweep, x, y, z, tstamp) VALUES (?, ?, ?, ?, ?, ?)""")
            batch = BatchStatement(consistency_level=ConsistencyLevel.QUORUM)
            batch_num = 0
            while point_index < len(points) and batch_num < 400:
                batch_num += 1
                self.point_id += 1
                p = points[point_index]
                point_index += 1
                batch.add(insert_sql, (self.point_id, sweep, p[0], p[1], p[2], tstamp))
            if batch_num > 0:
                self.session.execute_async(batch)
            if batch_num < 400:
                break

    # # save to csv then copy from
    # # cannot use for cassandra only support copy in cqlsh
    # def load_data(self, sweep, points, tstamp):
    #     f = open('tmp.csv', 'w')
    #     for p in points:
    #         self.point_id += 1
    #         f.write(str(self.point_id))
    #         f.write(',')
    #         f.write(str(sweep))
    #         f.write(',')
    #         f.write(str(p[0]))
    #         f.write(',')
    #         f.write(str(p[1]))
    #         f.write(',')
    #         f.write(str(p[2]))
    #         f.write(',')
    #         f.write(str(tstamp))
    #         f.write('\n')
    #     f.close()
    #     self.session.execute("COPY picsad.point_cloud (id, sweep, x, y, z, tstamp) FROM 'tmp.csv' WITH HEADER = FALSE")

    # # load per record
    # def load_data(self, sweep, points, tstamp):
    #     insert_stat = self.session.prepare("""
    #         INSERT INTO point_cloud (id, sweep, x, y, z, classification, tstamp) VALUES (?, ?, ?, ?, ?, ?)""")
    #     for p in points:
    #         self.session.execute(insert_stat, (self.point_id, sweep, p[0], p[1], p[2], tstamp))


if __name__ == '__main__':
    # load config
    with open(os.getcwd() + '/config.json', 'r') as f:
        config = json.load(f)

    # start monitor the running process
    data_base_path = config["data_base_path"]
    for c in config["test_case"]:
        test_case = config["collect_dataset"][c]
        Cassandra().load_test(test_case)