import json
import time
import scrapy
import os
import gzip

from utils.timestamp import create_timestamp
from utils.conf_read import Conf_Read
from utils.actuator import actuator


from pp.common import SU_NING_DB
from pp.items import Pipe_Pk
from pp.db_model import ShopModel, Company
from pp.log import create_log_filename


def get_search_company_url(token, company_name):
  return f'https://xcx.qcc.com/mp-weixin/forwardApp/v3/base/advancedSearch?t={time.time()}&searchKey={company_name}&token={token}'

def get_search_company_detail_url(token, id):
  return f'https://xcx.qcc.com/mp-weixin/forwardApp/v6/base/getEntDetail?t={time.time()}&token={token}&unique={id}'


def get_conf_path(file_name):
  _current_path = os.path.dirname(__file__)
  return os.path.join(_current_path, file_name)


def obj_map(keys_map, source):
  _c = {}
  for target_key, source_key in keys_map.items():
    _c[target_key] = source.get(source_key)
  return _c
  
  
class QCC(scrapy.Spider):
  name='qcc'

  _conf_read = Conf_Read(get_conf_path('company_setting.yaml')) 
  _db = SU_NING_DB
  _token='e1042cad41b09fe14d60d04f24686067'


  custom_settings = {
    'LOG_FILE': create_log_filename('qcc-company'),
    'LOG_LEVEL': 'INFO',
    'DOWNLOAD_DELAY ': 2, # 产品分页服务响应较慢, 过快请求, 服务将403抛弃部分请求
    'HTTPERROR_ALLOWED_CODES': [403],
    'ITEM_PIPELINES': {
      'pp.pipelines.CompanySavePipeline': 300,
    }
  }


  def start_requests(self):
    
    _common_conf = self._conf_read.conf
    print('当前ID {} / 目标ID'.format(_common_conf['current_shop_id']))
    _target_shop_id = int(input())
    current_id = _common_conf.get('current_shop_id', _target_shop_id)
    # 3525
    
    session = self._db.DBSession()
  
    while current_id <= _target_shop_id:
      
      shop = session.query(ShopModel).filter(
        # 非自营
        ShopModel.shop_type != 1,
        ShopModel.id == current_id

      ).first()

      current_id += 1
      _common_conf['current_shop_id'] = current_id

      if shop == None:
        continue

      self._conf_read.updateConf()

      url = get_search_company_url(self._token, shop.company_name)
      print(url)
      
      yield scrapy.Request(
        url=url,
        callback=self.get_company_id,
        cb_kwargs=dict(shop=shop)
      )

      time.sleep(10)

  def get_company_id(self, response, shop):

    json_error, res = actuator(json.loads)(response.text)
    
    if not json_error:
      self.logger.error('基础信息json解析错误: ')
      self.logger.error('----------------')
      self.logger.error(response.url)
      self.logger.error('----------------')
      self.logger.error(f'{shop.shop_name}/{shop.shop_id}')
      self.logger.error('----------------')
      self.logger.error(str(response.body, 'utf-8'))
      self.logger.error('----------------')
      return

    status_code = res.get('status')
    result = None
    
    if status_code != 200:
      self.logger.error(f'公司基础信息请求失败: {response.text}')
      return

    try:
      result = res.get('result').get('Result')[0]
    except Exception as e:
      self.logger.error(f'获取公司基础信息解构失败: {response.text}')
      self.logger.error(e)

    map_keys = {
      'platform_id': 'KeyNo',
      'name': 'Name',
      'registered_capital': 'RegistCapi',
      'registered_status': 'Status',
      'registered_address': 'Address',
      'register_date': 'StartDate',
      'legal_representative': 'OperName'
    }

    company_dict = obj_map(map_keys, result)
    
    yield scrapy.Request(
      url = get_search_company_detail_url(
        token = self._token,
        id = company_dict.get('platform_id', '')
      ),
      callback = self.get_company_detail,
      cb_kwargs = dict(shop=shop, company_dict=company_dict)
    )



  def get_company_detail(self, response, shop, company_dict):

    json_error, res = actuator(json.loads)(response.text)
    if not json_error:
      self.logger.error('详情json解析错误: ')
      self.logger.error('----------------')
      self.logger.error(f'{shop.shop_name}/{shop.shop_id}')
      self.logger.error('----------------')
      self.logger.error(str(response.body, 'utf-8'))
      self.logger.error('----------------')
      return
    
    status_code = res.get('status')
    result = None
    
    if status_code != 200:
      self.logger.error(f'获取公司详情请求失败: {response.text}')
      return

    try:
      result = res.get('result').get('Company')
    except Exception as e:
      self.logger.error(f'获取公司详情解构失败: {response.text}')
      self.logger.error(e)

    map_keys = {
      'company_type': 'EconKind',
      'contributed_capital': 'RecCap',
      'registered_capital': 'RegistCapi',
      'registration_authority' : 'BelongOrg',
      'business_scope': 'Scope',
      'b_code': 'No',
      't_code': 'TaxNo',
      'u_code': 'CreditCode',
      'o_code': 'OrgNo',
      'approval_date': 'CheckDate'
    }

    # TeamEnd 原接口字段错误, 应为 TermEnd
    business_term = '{}至{}'.format(result.get('TermStart'), result.get('TeamEnd'))
    industry = result.get('Industry').get('Industry')
    shop_id = shop.shop_id

    company = Company(**{
      'platform_type': '企查查',
      'create_time': create_timestamp(),
      'business_term': business_term,
      'industry': industry,
      'shop_id': shop_id,
      **company_dict,
      **obj_map(map_keys, result),
    })

    print('------------------')
    print(dict(company))
    print('------------------')
    self.logger.info(f'SAVE: company: {company.name} - shop_id: {company.shop_id}')
    
    yield Pipe_Pk(body = {
      'shop': shop,
      'company': company
    })
  



'''

# URL
https://capi.tianyancha.com/cloud-other-information/search/app/searchCompany

# HEADER POST
User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36 MicroMessenger/7.0.9.501 NetType/WIFI MiniProgramEnv/Windows WindowsWechat
content-type: application/json
version: TYC-XCX-WX

# BODY
{"sortType":1,"pageSize":20,"pageNum":1,"word":"深圳市蓝河钻石有限公司","allowModifyQuery":1}

'''



'''

持股人列表
#URL
https://xcx.qcc.com/mp-weixin/forwardApp/v1/boss/bossMatch?token=813cbfc99549eaa165c5fa492567650f&t=1622626857000&searchKey=广州柏特迈电子有限公司&from=companyPage&pageSize=10

# QUERY
token=813cbfc99549eaa165c5fa492567650f
searchKey=广州柏特迈电子有限公司
pageSize=20

t=1622626857000
from=companyPage


企业查询查询
#URL
https://xcx.qcc.com/mp-weixin/forwardApp/v3/base/advancedSearch?token=7a0330b63080175f4dd187eb9ba510ec&searchKey=%E5%A2%9E%E5%9F%8E

# QUERY
token:813cbfc99549eaa165c5fa492567650f
t:1622627518000
insuredCntStart:
hasSC:
hasBM:
hasAOP:
hasTM:
registCapiBegin:
isSortAsc:
hasHN:
searchType:
hasPatent:
hasPhone:
hasEmail:
hasMP:
hasLQ:
hasAPP:
hasTE:
hasENP:
needGroup:
searchKey:cc
currencyCode:
fundingTime:
hasFinance:
hasTA:
pageIndex:1
hasShiXin:
hasC:
hasCI:
searchIndex:10
hasIPO:
pageSize:
hasMobilePhone:
insuredCntEnd:
province:
industryV3:
hasGT:
hasZhuanLi:
hasCR:
hasCert:
statusCode:
hasWP:
registCapiEnd:
coyType:
countyCode:
hasGW:
sortField:
startDateBegin:
startDateEnd:


# 企业详情
# URL
GET /mp-weixin/forwardApp/v6/base/getEntDetail?token=7a0330b63080175f4dd187eb9ba510ec&t=1622685651000&unique=82c9578e3543732e3ac5212cf43f5ed2


/mp-weixin/forwardApp/v1/other/searchProject?token=7a0330b63080175f4dd187eb9ba510ec&t=1622686314000&pageIndex=1&searchKey=%E9%A1%BA%E4%B8%B0

/mp-weixin/forwardApp/v6/base/getEntDetail?token=74c25f567c3781832ba1e6aff9eb5cfa&t=1622700446000&unique=d1ed924c97ae8d7ec010ff8e0d9f9d27
'''


