from mydefine import *
from sqltb import *
import ipfshttpclient
import json
import multiprocessing
from model_parser import pcd2plist,mesh2pcd
from concurrent.futures import ThreadPoolExecutor, wait
import yaml

def import_model(head_recved, db, feature_net):
    block_size = head_recved['block_size']
    titles = head_recved['titles']
    values = head_recved['values']
    models = []
    category = getModelCategory(head_recved['category'])
    with ThreadPoolExecutor(max_workers=multiprocessing.cpu_count() - 2) as tp:
        def mesh_get_feature(i):
            # mesh转pcd
            modelFilePath = values[i][titles[5]]
            print(modelFilePath)
            try:
                points_list=pcd2plist(mesh2pcd(modelFilePath))
                points = torch.tensor(list(points_list), dtype=torch.float).view(1, -1, 3)
                # extend = os.path.basename(modelFilePath).split('.')[-1]
                # 提取特征
                feature = feature_net.feature(points).squeeze().cpu() # TODO: CUDA
                # feature = struct.pack('%df' % len(feature), *feature)
            except Exception as e:
                logger.warning(f'Failed to extract features of "{modelFilePath}", info: {str(e)}')
                feature = torch.ones(128).cpu()
            feature = tensor2str(feature)
            # 添加信息
            val = values[i]
            vname = val[titles[0]]
            dscrp = vname.split('_')[0]
            models.append(TBModel.TableRow( category=category,
                                            name=vname,
                                            type=val[titles[1]],
                                            tags=val[titles[2]],
                                            description=dscrp,
                                            feature=feature,
                                            data='',
                                            hash=modelFilePath,
                                            extend=val[titles[4]],
                                            trans_model=json.dumps(val[titles[6]]),
                                            trans_camera=json.dumps(val[titles[7]])
                                            ))
        feature_l=[tp.submit(mesh_get_feature,i) for i,_ in enumerate(block_size)]
        wait(feature_l)

    with open('config.yml', 'r') as conf_file:
        ymlConf = yaml.load(conf_file, Loader= yaml.FullLoader)
        ipfs_url = ymlConf['ipfs']['url']
    # 连接数据库
    tb_model = TBModel(database=db, feature_net=feature_net, ipfs_url=ipfs_url)
    # 添加模型到数据库
    status = tb_model.upload_models(models)
    if(None != status):
        head_tosend = {"type": RESPONSE_ERROR, "status":status}
    else:
        head_tosend = {"type": RESPONSE_UPLOADMODEL, "status":''}
        
        logging.info("几何形状信息分析成功!")
        logging.info("纹理信息分析成功!")
        logging.info("几何简化编码成功!")
        logging.info("材质和局部光照明模型编码成功!")

    return head_tosend

def import_model_simple(head_recved, db, feature_net):
    block_size = head_recved['block_size']
    assetType = head_recved['assetType']
    models = []
    if(0 == len(block_size)):
        head_tosend = {"type": RESPONSE_ERROR, "status":"没有数据！"}
    else: # 提取特征
        titles = head_recved['titles']
        values = head_recved['values']
        category = getModelCategory(head_recved['category'])
        with ThreadPoolExecutor(max_workers=multiprocessing.cpu_count() - 2) as tp:
            def mesh_get_feature(i):
                feature = torch.ones(128).cpu()
                feature = tensor2str(feature)
                # 添加信息
                val = values[i]
                vname = val[titles[0]]
                dscrp = vname.split('_')[0]
                models.append(TBModel.TableRow( category=category,
                                                name=val[titles[0]],
                                                type=val[titles[1]],
                                                tags=val[titles[2]],
                                                description=dscrp,
                                                feature=feature,
                                                data='',
                                                hash=val[titles[5]],
                                                extend=val[titles[4]],
                                                trans_model=json.dumps(val[titles[6]]),
                                                trans_camera=json.dumps(val[titles[7]])
                                                ))
            feature_l=[tp.submit(mesh_get_feature,i) for i,_ in enumerate(block_size)]
            wait(feature_l)

        with open('config.yml', 'r') as conf_file:
            ymlConf = yaml.load(conf_file, Loader= yaml.FullLoader)
            ipfs_url = ymlConf['ipfs']['url']
        # 连接数据库
        tb_model = TBModel(database=db, feature_net=feature_net, ipfs_url=ipfs_url)
        # 添加模型到数据库
        status = tb_model.upload_models(models)
        # 返回结果
        if(None != status):
            head_tosend = {"type": RESPONSE_ERROR, "status":status}
        else:
            head_tosend = {"type": RESPONSE_UPLOADMODEL_SIMPLE, "status":''}
                
        logging.info("几何形状信息分析成功!")
        logging.info("纹理信息分析成功!")
        logging.info("几何简化编码成功!")
        logging.info("材质和局部光照明模型编码成功!")

    
    return head_tosend

def import_bvh(head_recved, db):
    block_size = head_recved['block_size']
    if(0 == len(block_size)):
        head_tosend = {"type": RESPONSE_ERROR, "status":"没有数据！"}
    else:
        titles = head_recved['titles']
        values = head_recved['values']
        category = getModelCategory(head_recved['category'])

        sql_updateCount = "UPDATE tb_info SET count1 = (SELECT COUNT(*) FROM tb_bvh) where assetType==%s" % (ASSET_BVH)
        db.sqlexe(sql_updateCount)

        status, id_count = db.sqlexe("SELECT count1 FROM tb_info WHERE assetType==%s" % (ASSET_BVH))
        id_count = list(id_count[0])[0] + 1

        if status != None:
            return {"type": RESPONSE_ERROR, "status":status}

        insert_bvhs=[]

        with open('config.yml', 'r') as conf_file:
            ymlConf = yaml.load(conf_file, Loader= yaml.FullLoader)
            ipfs_url = ymlConf['ipfs']['url']
        try:
            client = ipfshttpclient.connect(ipfs_url)
        except Exception as e:
            logger.error(e)
            return {"type": RESPONSE_ERROR, "status":str(e)}
        for val in values:
            file_ipfs=val[titles[5]]        # hash
            ipfs_hash = client.add(file_ipfs)['Hash']
            insert_bvhs.append([
                id_count,                   # id    
                category,                   # 
                val[titles[0]],             # name
                val[titles[1]],             # type
                val[titles[2]],             # tags
                "",                         # description
                ipfs_hash,                  # hash
                val[titles[4]],             # extend
                json.dumps(val[titles[6]]), # trans_model
                json.dumps(val[titles[7]]), # trans_camera
            ])
            id_count += 1
        
        sql_insert2bvh = "INSERT INTO tb_bvh(id, category, name, type, tags, description, hash, extend, trans_model, trans_camera) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);"
        status, results = db.sqlexe(sql_insert2bvh, insert_bvhs)

        if status != None:
            return {"type": RESPONSE_ERROR, "status":status}

        status, res = db.sqlexe(sql_updateCount)

        if status != None:
            return {"type": RESPONSE_ERROR, "status":status}

        head_tosend = {"type": RESPONSE_UPLOADMODEL, "status":status}

        logging.info("几何特征信息分析成功!")
        
    return head_tosend


def import_effect(head_recved, db):
    block_size = head_recved['block_size']
    if(0 == len(block_size)):
        head_tosend = {"type": RESPONSE_ERROR, "status":"没有数据！"}
    else:
        titles = head_recved['titles']
        values = head_recved['values']
        category = getModelCategory(head_recved['category'])

        sql_updateCount = "UPDATE tb_info SET count1 = (SELECT COUNT(*) FROM tb_effect) where assetType==%s" % (ASSET_EFFECT)
        db.sqlexe(sql_updateCount)

        status, id_count = db.sqlexe("SELECT count1 FROM tb_info WHERE assetType==%s" % (ASSET_EFFECT))
        id_count = list(id_count[0])[0] + 1

        if status != None:
            return status

        insert_effects=[]

        with open('config.yml', 'r') as conf_file:
            ymlConf = yaml.load(conf_file, Loader= yaml.FullLoader)
            ipfs_url = ymlConf['ipfs']['url']
        try:
            client = ipfshttpclient.connect(ipfs_url)
        except Exception as e:
            logger.error(e)
            return {"type": RESPONSE_ERROR, "status":str(e)}
        for val in values:
            file_ipfs=val[titles[5]]        # hash
            ipfs_hash = client.add(file_ipfs)['Hash']
            insert_effects.append([
                id_count,                   # id    
                category,                   # 
                val[titles[0]],             # name
                val[titles[1]],             # type
                val[titles[2]],             # tags
                "",                         # description
                ipfs_hash,                  # hash
                val[titles[4]],             # extend
            ])
            id_count += 1
        
        sql_insert2effect = "INSERT INTO tb_effect(id, category, name, type, tags, description, hash, extend) VALUES (?, ?, ?, ?, ?, ?, ?, ?);"
        status, results = db.sqlexe(sql_insert2effect, insert_effects)

        status, res = db.sqlexe(sql_updateCount)

        head_tosend = {"type": RESPONSE_UPLOADMODEL, "status":status}

    return head_tosend