# -*- enconding:etf-8 -*-

import requests
import os
import re
import json
import time
import hashlib
from datetime import datetime

from apscheduler.executors.pool import ThreadPoolExecutor
from apscheduler.schedulers.background import BackgroundScheduler
import re

import mongo_util
from craw_model import Basics, Containers, Customs, Sails, Statuses, Wharf, WharfTransports, WharfVoy
from smlines_crawer import GoodTracking

from crawer_detail import post_code, save_pic
from predict import predict

from spiders.crawer_shipment import get_shipment_info


# 厦门集装箱码头


def parse_jxCom(craw_id):
    # XMN0A2793200

    headers = {
        'Accept': 'application/json, text/plain, */*',
        'Accept-Encoding': 'gzip, deflate, br',
        'Host': 'www.jx-c.com.cn',
        'Referer': 'https://www.jx-c.com.cn/weixin/wxWharfGroupSuitcase.html',
        'User-Agent ': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36',
    }

    searchC = {
        'propertyName': 'ieBlno',
        'columnName': 'IE_BLNO',
        'dataType': 'S',
        'value': craw_id,
        'operation': 'EQ',
    }
    params = {
        'operationEnvironment': 'app',
        'queryResultType': 'page',
        'searchColumns': json.dumps(searchC),
        'sum': 'false',
    }

    # https://www.jx-c.com.cn/login.html  （厦门集装箱码头）
    response = requests.get("https://www.jx-c.com.cn/xctgonline/blnoInquiries/customQuery", headers=headers,
                            params=params)
    res = json.loads(response.text)


    ## 获取费用清单
    import jsonpath
    se = jsonpath.jsonpath(response.json(), '$..ieConid')
    if se:
        ieConid = se[0]
    else:
        ieConid = ""

    se = jsonpath.jsonpath(response.json(), '$..ieDataSource')
    if se:
        ieDataSource = se[0]
    else:
        ieDataSource = ""

    if ieConid and ieDataSource:
        headers['encodeFlag']='true'

        import base64
        search_dict = [
            {"propertyName":"conid","columnName":"CONID","dataType":"S","value":ieConid,"operation":"EQ"},
            {"propertyName":"dataSource","columnName":"DATA_SOURCE","dataType":"S","value":ieDataSource,"operation":"EQ"}]
        newparams = {
            "sortDirection":str(base64.b64encode("undefined".encode('utf-8')),"utf-8"),
            "sortName":str(base64.b64encode("undefined".encode('utf-8')),"utf-8"),
            'sum': str(base64.b64encode("false".encode('utf-8')),"utf-8"),
            'searchColumns':str(base64.b64encode(json.dumps(search_dict).encode('utf-8')),"utf-8")
        }



        response = requests.get("https://www.jx-c.com.cn/xctgonline/obsDwFee/myQuery", headers=headers,
                                params=newparams)
        res1 = json.loads(response.text)
        if 'obsDwFees' in res1:
            res['obsDwFees'] = res1['obsDwFees']
        ## portfee   货物港务费
        ## addfee 港口设施安保费
        ## stackfee 堆存保管费
        ## portContract 港口作业包干费
        ## total  合计
    return res


def parse_esvcCom(craw_id):
    base_url = 'https://www.shipmentlink.com/servlet/TUF1_CaptchaUtils?d='
    testpath = './datas/'
    timestamp = str(int(round(time.time() * 1000)))
    imageurl = base_url + timestamp
    save_pic(imageurl)
    imagecode = predict(img_dir=testpath)
    from spiders.crawer_shipment import get_shipment_info
    res = post_code(timestamp, imagecode, craw_id)
    print(res)
    return  res


from fastapi import FastAPI

app = FastAPI()


@app.get('/crawl/{craw_id}')
async def craw_item(craw_id: str):
    # XMN0A2793200
    res = parse_jxCom(craw_id)
    # res = json.dumps(res)
    # res = res.encode('utf-8').decode('unicode-escape')
    save_data(url='xiamen_ganwu', craw_id=craw_id, res=res),
    return {'key_url': 'xiamen_ganwu', 'key_word': craw_id, 'md5': value_md5(craw_id), 'value': res}
    # return  'success'


@app.get('/crawl/status/{craw_id}')
async def craw_item(craw_id: str):
    res = parse_jxCom(craw_id)
    return {"res": len(res)}


@app.get('/crawlSmlines/{craw_id}')
async def craw_item(craw_id: str):
    g = GoodTracking()
    data = g.run(craw_id)
    save_data(url='smlines', craw_id=craw_id, res=data),
    return {'key_url': 'smlines', 'key_word': craw_id, 'md5': value_md5(craw_id), 'value': data}


@app.get('/crawlSmlines/status/{craw_id}')
async def crawl_smlines(craw_id: str):
    g = GoodTracking()
    data = g.run(craw_id)
    return {'res': len(data) > 50}



from spiders import parse_shipment
@app.get('/crawlEsvc/{craw_id}')
async def craw_item(craw_id: str):
    # XMN0A2793200
    res = parse_shipment(craw_id)
    save_data(url='crawlEsvc', craw_id=craw_id, res=res),
    return {'key_url': 'crawlEsvc', 'key_word': craw_id, 'md5': value_md5(craw_id), 'value': res}


@app.get('/crawlEsvc/status/{craw_id}')
async def crawl_smlines(craw_id: str):
    return {'res': 1}



from spiders.crawl_webquery import parse_webquery
@app.get('/crawlWebquery/{craw_id}')
async def crawlWebquery(craw_id: str):
    # XMN0A2793200
    res = parse_webquery(craw_id)
    save_data(url='crawlWebquery', craw_id=craw_id, res=res),
    return {'key_url': 'crawlWebquery', 'key_word': craw_id, 'md5': value_md5(craw_id), 'value': res}


@app.get('/crawlWebquery/status/{craw_id}')
async def crawlWebqueryStatus(craw_id: str):
    return {'res': 1}


# =========================  上面是api =============================

client = mongo_util.init_mongo()
collection = mongo_util.connect(client=client)


def save_data(url, craw_id, res):
    # 1 存key到json
    write_json(url, craw_id, value_md5(craw_id), value_md5(craw_id))
    # 2 存数据到mongo
    mongo_util.insert_or_update(collection, result={"search_key":url+"#"+craw_id,'key_url': url, 'key_word': craw_id, 'md5': value_md5(craw_id), 'value': res})
    # mongo_util.insert_mongo(collection,
    #                         result={'key_url': url, 'key_word': craw_id, 'md5': '2c83ee1b65c82241ac44d5f00dbecbeb',
    #                                 'value': res})


def value_md5(value):
    # hash_md5 = hashlib.md5(value.encode("utf8"))
    hash_md5 = hashlib.md5(value.encode("utf8"))
    md5_value = hash_md5.hexdigest()
    return md5_value


def write_json(key_url, key_word, md5, md5_second):
    data = {'key_url': key_url, 'key_word': key_word, 'md5': md5, 'md52': md5_second}
    fileObject = open('data.json', 'a', )
    content = json.dumps(dict(data)) + "\n"
    fileObject.write(content.encode('utf-8').decode('unicode-escape'))
    fileObject.close()


def load_json():
    f = open("data.json")
    mstr = f.readlines()
    m_list = []
    if len(mstr) >= 5:
        for line in mstr:
            line = line.strip('\n')
            json1 = json.loads(line)
            m_list.append(json1)
    f.close()
    return m_list


# 定时任务
def timer_job():
    t = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
    print('当前时间：{}'.format(t))
    m_dict = load_json()
    if str(m_dict) != 0:
        print(m_dict)
        for m in m_dict:
            if m['key_url'] == 'xiamen_ganwu' and 0:
                if m['md5'] != m['md52']:
                    print('当前md5值一样，忽略  --------  ')
                    pass
                else:
                    print('当前md5值不一样，开始爬取 -------- ')
                    parse_jxCom(m['key_word'])
            elif m['key_url'] == "smlines":
                if m['md5'] != m['md52']:
                    print('当前md5值一样，忽略  --------  ')
                    pass
                else:
                    print('当前md5值不一样，开始爬取 -------- ')
                    parse_esvcCom(m['key_word'])
    pass


@app.on_event('startup')
def init_scheduler():
    """init """
    # # 2 启动mongo
    client = mongo_util.init_mongo()
    print('mongo2 启动')
    collection = mongo_util.connect(client=client)

    # redis_job_store = RedisJobStore(host='127.0.0.1', port='6379')
    executor = ThreadPoolExecutor()

    # jobstores = {
    #     'default': redis_job_store
    # }
    executors = {
        'default': executor,
    }
    job_defaults = {
        'coalesce': True,
        'max_instance': 1
    }

    global scheduler
    scheduler = BackgroundScheduler()
    scheduler.configure(executors=executors, job_defaults=job_defaults)
    # # 每隔 1分钟 运行一次 job 方法
    scheduler.add_job(timer_job, 'interval', minutes=1)
    scheduler.start()


if __name__ == "__main__":
    # 3 启动api
    import uvicorn

    # 官方推荐是用命令后启动 uvicorn main:app --host=127.0.0.1 --port=8010 --reload
    uvicorn.run(app='crawer_website:app', host="127.0.0.1", port=8090, reload=True, debug=True)

    # g = GoodTracking()
    # g.run('XMN0A2020600')
    # data = parse_esvcCom(146900296776)
    # print(data)