# encoding: utf-8
"""
UCloud resources
============
"""

import logging
import sys
import os
import zipfile
import gzip

from flask_restplus import Resource
import sqlalchemy
import traceback

from app import App
from app.extensions import db
from app.extensions.api import Namespace, abort, http_exceptions
from app.modules.common.pipelines import *
from app.modules.common import *
from app.modules.common.utils import path_modify_time
from app.modules.database.pipelines_factory import DatabasePipelinesFactory
from config import BeetleConfig

log = logging.getLogger(__name__) # pylint: disable=invalid-name
api = Namespace('database', description="database") # pylint: disable=invalid-name

from . import parameters, schemas
from .models import DataBase


@api.route('/import')
class DbImport(Resource):

    @api.parameters(parameters.DbImportParameters())
    @api.response(code=http_exceptions.Conflict.code)
    def put(self, args):
        log.info("********* start import database *********")
        log.info("********* param[dump_path] is %s", args['dump_path'])
        log.info("********* param[db_type] is %s", args['db_type'])
        log.info("********* param[db_name] is %s", args['db_name'])

        pipeline_manager.start_pipeline(args['db_name'], pipeline_params=[])

        return None


@api.route('/reimport')
class DbImport(Resource):

    @api.parameters(parameters.DbReImportParameters())
    @api.response(code=http_exceptions.Conflict.code)
    def put(self, args):
        zone = str(args['zone'])
        db_type = str(args['db_type'])
        db_name = str(args['db_name'])
        data_from = str(args['data_from'])
        log.info("********* param[zone] is %s", zone)
        log.info("********* param[db_type] is %s", db_type)
        log.info("********* param[db_name] is %s", db_name)
        log.info("********* param[data_from] is %s", data_from)

        if data_from and data_from == 'product':
            is_product = True
        else:
            is_product = False

        apps = App.query.filter_by(disabled=0).filter(App.zone_code == zone, App.db_name == db_name, App.db_type == db_type).order_by(App.starting_order.asc()).all()

        if apps:
            for app in apps:
                pipeline_name = DatabasePipelinesFactory.get_pipeline_name(app.app_id,
                                                                           app.db_type,
                                                                           app.db_name,
                                                                           is_product=is_product)
                log.info('***> Start submit pipeline : %s', pipeline_name)
                pipeline_manager.start_pipeline(pipeline_name=pipeline_name)

            # restart app pipelines
            # for app in apps:
            #     pipeline_name = 'pip:' + app.app_id + '_restart'
            #     log.info('***> Start submit pipeline : %s', pipeline_name)
            #     pipeline_manager.start_pipeline(pipeline_name=pipeline_name)

        return None


@api.route('/updateDumpTime')
class DbImport(Resource):

    @api.parameters(parameters.UpdateDumpTimeParameters())
    @api.response(code=http_exceptions.Conflict.code)
    def put(self, args):
        data_from = str(args['data_from'])
        log.info("********* param[data_from] is %s", data_from)

        if data_from and data_from == 'product':
            is_product = True
        else:
            is_product = False

        apps = App.query.filter_by(disabled=0).order_by(App.starting_order.asc()).all()
        for app in apps:
            if app.db_name and app.db_type:
                db_name = app.db_name
                if str(app.db_name).upper() == 'ADEMPIERE':
                    db_name = 'ERP'

                if is_product:
                    dump_path = BeetleConfig.DUMP_PRODUCT_BASE_PATH + '/'+app.db_type+'/' + db_name
                else:
                    dump_path = BeetleConfig.DUMP_DEMO_BASE_PATH + '/'+app.db_type+'/' + db_name

                m_time = path_modify_time(dump_path)

                if m_time:
                    db_dump_create_date = time.strftime('%Y%m%d-%H%M%S', m_time)
                    try:
                        new_apps = App.query.filter_by(disabled=0).filter_by(db_type=app.db_type).filter_by(db_name=app.db_name).order_by(App.starting_order.asc()).all()

                        for _app in new_apps:
                            log.info('---------------------> %s', _app.app_name)
                            _app.db_dump_create_date = db_dump_create_date
                        db.session.commit()
                    except Exception, e:
                        log.error(u'更新数据库dump文件创建时间失败，失败原因：%s', e.message)
                        abort(1, u'更新数据库dump文件时间失败, 失败原因：%s', e.message)
        return None


@api.route('/check')
class DbDumpCheck(Resource):

    @api.parameters(parameters.DumpCheckParameters())
    @api.response(code=http_exceptions.Conflict.code)
    def get(self, args):
        data_from = str(args['data_from'])
        log.info("********* param[data_from] is %s", data_from)

        if data_from and data_from == 'product':
            is_product = True
        else:
            is_product = False

        apps = App.query.filter_by(disabled=0).order_by(App.starting_order.asc()).all()
        checked_db_name = set()
        checked_db = []
        for app in apps:
            if app.db_name and app.db_type:
                db_name = app.db_name
                if str(app.db_name).upper() == 'ADEMPIERE':
                    db_name = 'ERP'

                key = str(app.db_name + ':' + app.db_type)
                if key not in checked_db_name:
                    checked_db_name.add(key)

                    db = {'name': app.db_name, 'type': app.db_type}
                    if is_product:
                        dump_path = BeetleConfig.DUMP_PRODUCT_BASE_PATH + '/' + app.db_type + '/' + db_name
                    else:
                        dump_path = BeetleConfig.DUMP_DEMO_BASE_PATH + '/' + app.db_type + '/' + db_name

                    m_time = path_modify_time(dump_path)

                    if m_time:
                        db_dump_create_date = time.strftime('%Y%m%d-%H%M%S', m_time)
                        db['dump_date'] = db_dump_create_date
                    else:
                        db['dump_date'] = None

                    flag, msg = self._check_dump(app.db_name, app.db_type, dump_path)

                    db['dump_status'] = msg

                    checked_db.append(db)
        return checked_db

    def _check_dump(self, db_name, db_type, dump_path):
        flag, msg = self._check_dump_file_exists(dump_path)
        if not flag:
            return flag, msg

        flag, msg = self._check_file_size(dump_path)
        if not flag:
            return flag, msg

        if 'mysql' == str(db_type).lower():
            return self._check_mysql_dump(db_name, dump_path)
        elif 'oracle' == str(db_type).lower():
            return self._check_oracle_dump(dump_path)
        elif 'cassandra' == str(db_type).lower():
            return self._check_cassandra_dump(dump_path)
        else:
            return False, '未知数据库类型[%s]，无法检测' % db_type

    def _check_mysql_dump(self, db_name, dump_path):
        file_name_list = os.listdir(dump_path)

        exists_tables = False

        schema_file_name = '%s.schema.sql' % db_name
        tables_txt_file_name = '%s.tabs.txt' % db_name

        if schema_file_name not in file_name_list:
            return False, '缺少文件:%s' % schema_file_name

        for file_name in file_name_list:
            if file_name.lower().endswith('.sql.gz'):
                exists_tables = True
                break

        if not exists_tables:
            return False, '未找到可用的表数据'

        for file_name in file_name_list:
            if file_name.lower().endswith('.sql.gz'):
                dump_file = dump_path + '/' + file_name
                chunk_size = 2 ** 20
                try:
                    gz = gzip.open(dump_file, mode='r')
                    while gz.read(chunk_size):
                        pass
                except:
                    return False, '文件%s不完整' % file_name
                finally:
                    gz.close()

        if tables_txt_file_name not in file_name_list:
            return False, '缺少文件:%s' % tables_txt_file_name

        return True, '正常'

    def _check_oracle_dump(self, dump_path):
        file_name_list = os.listdir(dump_path)

        if len(file_name_list) > 1:
            return False, '数据文件大于1个'

        dump_file = file_name_list[0]
        if not dump_file.lower().endswith('.zip'):
            return False, '数据文件格式错误: %s' % dump_file

        file_name = zipfile.ZipFile(dump_path + '/' + dump_file).testzip()

        if file_name:
            return False, '压缩文件%s不完整' % dump_file

        return True, '正常'

    def _check_cassandra_dump(self, dump_path):
        file_name_list = os.listdir(dump_path)
        if '00_keyspace_schema.cql' not in file_name_list:
            return False, '缺少文件：00_keyspace_schema.cql'

        flag = False
        for file_name in file_name_list:
            if file_name.endswith('data.csv.gz'):
                flag = True
                break

        if flag:
            return True, '正常'
        else:
            return False, '未找到可用的表数据'

    def _check_dump_file_exists(self, dump_path):
        if os.path.exists(dump_path) and len(os.listdir(dump_path)) > 0:
            return True, '正常'
        else:
            return False, '未找到可用的数据文件'

    def _check_file_size(self, dump_path):
        files = os.listdir(dump_path)
        if files and len(files) > 0:
            for filename in files:
                size = os.path.getsize(dump_path + '/' + filename)
                if size == 0:
                    return False, '文件%s不完整' % filename

        return True, '正常'

