# coding=utf-8

"""
@author  zhongy
@date    2018-10-17
@desc    数据子集controller
"""
import os
import re
import json
from controller.controller_base import *
from dbs.models import SubDataSet
from controller.export_controller import ExportController
from controller.objsrv_controller import ObjSrvController
from conf.settings import settings
from flask import current_app

class SubDataSetController(ControllerBase):
    def __init__(self):
        super(SubDataSetController, self).__init__()

    # 添加数据子集
    def add(self, sub_set_name, data_set_id):
        result = False
        sub_data_set = SubDataSet(sub_set_name=sub_set_name, data_set_id=data_set_id)
        self.db.session.add(sub_data_set)

        world_name = '%s-私有世界' % sub_set_name
        obj_srv = ObjSrvController()
        world_item = obj_srv.create_private_world(world_name)
        if world_item.get('private_space'):
            world_item['world_name'] = world_name
            sub_data_set.extend_info = world_item
            self.db.session.commit()
            result = True
        self.db.session.close()
        return result

    # 获取子集对应的私有世界空间
    def get_space_id(self, sub_set_id):
        space_id = ''
        sub_set_item = self.db.session.query(SubDataSet).filter(SubDataSet.sub_set_id == sub_set_id).first()
        if sub_set_item:
            extend_info = sub_set_item.extend_info
            if extend_info:
                space_id = extend_info.get('private_space')
        return space_id

    # 查询数据子集列表
    def list(self, sub_set_name=None, data_set_id=None, page_index=None, page_size=None):
        sub_data_set_list = self.db.session.query(SubDataSet.sub_set_id, SubDataSet.data_set_id,
                                                  SubDataSet.sub_set_name, SubDataSet.extend_info,
                                                  DataSet.data_set_name, SubDataSet.create_time,).\
            outerjoin(DataSet, DataSet.data_set_id == SubDataSet.data_set_id).\
            filter(and_(SubDataSet.is_deleted == False, SubDataSet.sub_set_name.like('%'+sub_set_name+'%')))
        if data_set_id is not None:
            sub_data_set_list = sub_data_set_list.filter(SubDataSet.data_set_id == data_set_id) \
                .order_by(SubDataSet.create_time.desc()).paginate(page_index, page_size)
        elif data_set_id is None:
            sub_data_set_list = sub_data_set_list.order_by(SubDataSet.create_time.desc())\
                .paginate(page_index, page_size)

        pagination = {
            'page_size': page_size,
            'page_index': page_index,
            'total': sub_data_set_list.total,
            'page_count': sub_data_set_list.pages
        }

        if sub_data_set_list.items is not None:
            result = []
            for item in sub_data_set_list.items:
                atom = {
                            'data_set_name': item.data_set_name,
                            'sub_set_id': item.sub_set_id,
                            'data_set_id': item.data_set_id,
                            'sub_set_name': item.sub_set_name,
                            'extend_info': item.extend_info,
                            'create_time': item.create_time.strftime('%Y-%m-%d %H:%M:%S')
                        }
                result.append(atom)
            return result, pagination
        else:
            return [], pagination

    # 查询全部数据子集
    def all(self, sub_set_name=None, data_set_id=None):
        sub_data_set_list = self.db.session.query(SubDataSet.sub_set_id, SubDataSet.data_set_id,
                                                  SubDataSet.sub_set_name, SubDataSet.create_time,
                                                  DataSet.data_set_name). \
            outerjoin(DataSet, DataSet.data_set_id == SubDataSet.data_set_id). \
            filter(and_(SubDataSet.is_deleted == False, SubDataSet.sub_set_name.like('%' + sub_set_name + '%')))
        if data_set_id is not None:
            sub_data_set_list = sub_data_set_list.filter(SubDataSet.data_set_id == data_set_id) \
                .order_by(SubDataSet.create_time.desc()).all()
        elif data_set_id is None:
            sub_data_set_list = sub_data_set_list.order_by(SubDataSet.create_time.desc()).all()
        result = []
        if sub_data_set_list is not None:
            for item in sub_data_set_list:
                atom = {
                    'data_set_name': item.data_set_name,
                    'sub_set_id': item.sub_set_id,
                    'data_set_id': item.data_set_id,
                    'sub_set_name': item.sub_set_name,
                    'create_time': item.create_time.strftime('%Y-%m-%d %H:%M:%S')
                }
                result.append(atom)
        return result

    # 更新数据子集
    def update(self, sub_set_id, sub_set_name=None, is_deleted=None):
        sub_data_set = self.db.session.query(SubDataSet).get(sub_set_id)
        if sub_set_name is not None:
            sub_data_set.sub_set_name = sub_set_name
        if is_deleted is not None:
            sub_data_set.is_deleted = is_deleted
            if is_deleted:
                obj_srv = ObjSrvController()
                extend_info = sub_data_set.extend_info
                world_id = extend_info.get('private_world')
                if world_id:
                    obj_srv.delete_private_world()

        self.db.session.commit()
        self.db.session.close()
        return True

    # 根据数据子集名称及数据集id查询单个数据子集
    def get_by_name(self, sub_set_name, data_set_id):
        sub_data_set = self.db.session.query(SubDataSet).filter(and_(SubDataSet.is_deleted == False,
                                                                     SubDataSet.sub_set_name == sub_set_name,
                                                                     SubDataSet.data_set_id == data_set_id)).first()
        return sub_data_set

    def get_export_list(self, page_size, page_index):

        export_list=[]
        exports = self.db.session.query(ExportRecord). \
            order_by(ExportRecord.create_time.desc()).paginate(page_index, page_size, False)

        for export in exports.items:
            sub = self.db.session.query(SubDataSet).filter(SubDataSet.sub_set_id == export.sub_set_id).first()
            export_dict = self.serialize(export)
            export_dict['sub_set_name'] = sub.sub_set_name
            export_list.append(export_dict)

        data = {
            'export_list': export_list,
            'pagination': {
                'page_index': page_index,
                'page_size': page_size,
                'page_count': exports.pages,
                'total': exports.total
            }
        }
        return data

    def check_same_name(self, path, name):
        """查看linux服务器 /data/gdb_files/里的文件列表"""

        if not os.path.exists(path):    # 路径不存在则创建路径
            writelog(os.path.exists(path))
            os.makedirs(path)

        list_dir = os.listdir(path)  # linux 文件list
        for linux_file_name in list_dir:
            if name+".zip" == linux_file_name:
                return True
        return False

    def export_shp(self, path, user_id, sub_set_id, layer_codes, file_name):
        """根据数据子集id 导出shp文件"""
        try:
            export_record = ExportRecord(
                user_id=user_id,
                sub_set_id=sub_set_id,
                geo_layers=layer_codes,
                file_path=path+file_name+'.zip'
            )
            self.db.session.add(export_record)
            self.db.session.commit()

            controller = ExportController()
            controller.export_shp(sub_set_id, layer_codes, path, file_name, export_record)
        except:
            writelog(traceback.format_exc())