'''
@File    :   crawl.py
@Time    :   2021/05/18 23:09:10
@Author  :   小木 
@Contact :   hunt_hak@outlook.com
'''

import re
import os
import time
import requests
from lxml import etree
from predict import predict


# import pandas as pd

from scrapy.selector import Selector
from utils.text_utils import clean_text
from copy import deepcopy
from utils.http_utils import post_http

testpath = './datas/'
# model_path = './model_own_final.pth'


def get_cookie():
    url = 'https://www.shipmentlink.com/servlet/TDB1_CargoTracking.do'
    req = requests.get(url, headers=headers)
    cookies = requests.utils.dict_from_cookiejar(req.cookies)  # 转成字典格式
    # cookies['TDB1_Function_Type'] = 'quick'
    # print(cookies, type(cookies))
    s = cookies['JSESSIONID']
    return s


headers = {
    'User-Agent':
    'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',
    'Content-Type': 'application/x-www-form-urlencoded',
    'Referer': 'https://www.shipmentlink.com/servlet/TDB1_CargoTracking.do',
}

cookie_id = get_cookie()

cookies = {'TDB1_Function_Type': 'quick', 'JSESSIONID': str(cookie_id)}

# def get_html(url):
#     html = requests.get(url, headers=headers_2)
#     html.encoding = 'utf-8'
#     text = html.text
#     return text


def save_pic(url, basepath=testpath,request_cookie=cookies):
    photo = requests.get(url, headers=headers, cookies=request_cookie)
    print(request_cookie)
    filename = os.path.join(basepath, '2.jpg')
    with open(filename, 'wb') as f:
        f.write(photo.content)



def parse_Transship_Information_table(trs,fisrt_key):
    result = {}
    if len(trs)<3:
        return result
    result[fisrt_key] = {}
    sencond_keys = trs[1].xpath("./td/text()").extract()
    third_values = trs[2].xpath('./td/text()').extract()
    for key,value in zip(sencond_keys,third_values):
        result[fisrt_key][clean_text(key)] = clean_text(value)

    return result

def parse_Advance_Filing_Status_table(trs,fisrt_key):
    result = {}
    if len(trs)<3:
        return result
    result[fisrt_key] = {}
    sencond_keys = trs[1].xpath("./td/text()").extract()
    third_values = trs[2].xpath('./td/text()').extract()
    for key, value in zip(sencond_keys, third_values):
        result[fisrt_key][clean_text(key)] = clean_text(value)

    return result

def parse_Release_Status_table(trs,fisrt_key):
    result = {}
    if len(trs)!=7:
        return result
    result[fisrt_key] = {}
    keys = trs[1].xpath("./td/text()").extract()
    values = trs[2].xpath('./td/text()').extract()
    result[fisrt_key][clean_text(keys[0])] = {}
    for key, value in zip(keys[1:], values):
        result[fisrt_key][clean_text(keys[0])][clean_text(key)] = clean_text(value)

    keys = trs[3].xpath("./td/text()").extract()
    values = trs[4].xpath('./td/text()').extract()
    result[fisrt_key][clean_text(keys[0])] = {}
    for key, value in zip(keys[1:], values):
        result[fisrt_key][clean_text(keys[0])][clean_text(key)] = clean_text(value)

    last_keys = trs[5].xpath("./td/text()").extract()
    last_values = trs[6].xpath('./td/text()').extract()
    for key, value in zip(last_keys, last_values):
        result[fisrt_key][clean_text(keys[0])][clean_text(key)] = clean_text(value)

    return result

def parse_Contact_Office_table(trs,fisrt_key):
    result = {}
    if len(trs) != 3:
        return result
    result[fisrt_key] = {}
    keys = trs[1].xpath("./td/text()").extract()
    values = trs[2].xpath('./td/text()').extract()
    result[fisrt_key][clean_text(keys[0])] = {}
    for key, value in zip(keys[1:], values):
        result[fisrt_key][clean_text(keys[0])][clean_text(key)] = clean_text(value)
    return result

#parse_Reference_Information_table

def parse_parse_Reference_Information_table(trs,fisrt_key):
    result = {}
    if len(trs) != 3:
        return result
    result[fisrt_key] = {}
    keys = trs[1].xpath("./td/text()").extract()
    values = trs[2].xpath('./td/text()').extract()
    result[fisrt_key][clean_text(keys[0])] = {}
    for key, value in zip(keys[1:], values):
        result[fisrt_key][clean_text(keys[0])][clean_text(key)] = clean_text(value)
    return result

def paser_Container_Activity_Information_table(table,fisrt_key,sel,headers,cookies):
    result = {}
    trs = table.xpath('./tr')
    if len(trs) < 3:
        return result
    result[fisrt_key] = []
    key_tds = trs[1].xpath('./td').xpath("string(.)").extract()
    key_tds = [key.strip() for key in key_tds]

    key_tds = [ key[:-1] if key.endswith(".") else key for key in key_tds]
    for tr_value in trs[2:]:
        td_value_nodes = tr_value.xpath("./td")
        td_values = []
        try:
            td_values.append(re.findall('frmCntrMoveDetail\(\'(.*?)\'\)', td_value_nodes[0].extract(), re.S | re.M)[0])
        except Exception as e:
            td_values.append("")
        td_values += [td.xpath("string(.)").extract()[0] for td in td_value_nodes[1:]]
        td_values = [key.strip() for key in td_values]
        item_dict = {}
        for key, value in zip(key_tds, td_values):
            item_dict[clean_text(key)] = clean_text(value)
        result[fisrt_key].append(item_dict)

    input_hidden_nodes = sel.xpath(".//form[@name='frmCntrMove']/input[@type='hidden']")
    post_data_dict = {}
    for node in input_hidden_nodes:
        try:
            node_key = node.xpath("./@name").extract()[0]
            node_value = node.xpath("./@value").extract()[0]
            post_data_dict[node_key] = node_value
        except Exception as e:
            pass
    post_url = 'https://www.shipmentlink.com/servlet/TDB1_CargoTracking.do'
    print(post_data_dict)
    #frmCntrMove
    #cntr_no

    for container in result['Containers']:
        contr_no = container['Container No']
        new_post_data = deepcopy(post_data_dict)
        new_post_data['cntr_no'] = contr_no
        html_text = post_http(post_url,new_post_data,cookies,headers)
        sub_sel = Selector(text= html_text)

        table_nodes = sub_sel.xpath('//table')
        assert len(table_nodes) == 2
        table_trs = table_nodes[1].xpath('./tr')
        assert len(table_trs) >= 2

        # 第一行tr pass
        # 第二行tr 是key
        # 后面的tr 都是value
        keys = table_trs[1].xpath('./td/text()').extract()
        keys = [clean_text(key) for key in keys]
        container_move_info = []
        for value_node in table_trs[2:]:
            values = value_node.xpath('./td')
            values = [value.xpath('string(.)').extract()[0] for value in values]

            values= [clean_text(value) for value in values]

            assert len(values) == len(keys)
            temp_dict = {key:value for key,value in zip(keys,values)}
            container_move_info.append(temp_dict)
        container['Container Moves'] = container_move_info


    return result


def parse_tables(sel,headers=None,cookies=None):
    result = {}
    total_table = sel.xpath("//table")
    for table in total_table:
        trs = table.xpath('./tr')
        first_tr_html = trs[0].extract()
        if "Advance Filing Status" in first_tr_html:
            temp_result = parse_Advance_Filing_Status_table(trs,"Advance Filing Status")
        elif 'Transship Information' in first_tr_html:
            temp_result = parse_Transship_Information_table(trs,"Transship Information")
        elif 'Container(s) information on B/L and Current Status' in first_tr_html:
            temp_result = paser_Container_Activity_Information_table(table,"Containers",sel,headers,cookies)
        elif 'Release Status' in first_tr_html:
            temp_result = parse_Release_Status_table(trs,"ReleaseStatus")
        elif 'Contact Office' in first_tr_html:
            temp_result = parse_Contact_Office_table(trs,"Contact Office")
        # elif 'Pick-up Reference Information' in first_tr_html:
        #     temp_result = parse_Reference_Information_table(trs, "Reference Information")

        else:
            temp_result = {}
        result.update(temp_result)
    return result

def post_code(timestamp, captcha_input, BL='146900296776',request_cookie= cookies):
    BL = str(BL)
    url = 'https://www.shipmentlink.com/servlet/TDB1_CargoTracking.do'

    data = [
        ('BL', BL),
        ('CNTR', ''),
        ('bkno', ''),
        ('TYPE', 'BL'),
        ('NO', ''),
        ('NO', ''),
        ('NO', ''),
        ('NO', ''),
        ('NO', ''),
        ('NO', BL),
        ('SEL', 's_bl'),
        ('captcha_input', captcha_input),
        ('hd_captcha_seq', timestamp),
    ]

    totoal_result = {}
    ## 第一步，获取到所有的container信息
    resp = requests.post(url, headers=headers, data=data, cookies=request_cookie)
    sel = Selector(text=resp.text)
    container_result = parse_tables(sel)
    totoal_result.update(container_result)


    firstCtnNo = re.findall('firstCtnNo=(.*?)&pod', resp.text,re.S | re.M)[0]
    print('firstCtnNo =',firstCtnNo)

    ## 获取Advance Filing Status 表格内容
    data1 = [
        ('TYPE', 'GetDispInfo'),
        ('Item', 'AMSACK'),
        ('BL', BL),
        ('firstCtnNo', firstCtnNo),  # Container No
        ('pod', 'USLAX'),
    ]
    resp1 = requests.post(url, headers=headers, data=data1, cookies=request_cookie)
    sel = Selector(text=resp1.text)
    temp_result = parse_tables(sel,headers=headers,cookies=request_cookie)
    if temp_result:
        totoal_result.update(temp_result)

    infos1 = re.findall('<td.*?>(.*?)</td>', resp1.text,re.S | re.M)



    # 获取Release Status表格内容
    data2 = [
        ('TYPE', 'GetDispInfo'),
        ('Item', 'RlsStatus'),
        ('BL', BL),
    ]
    resp2 = requests.post(url, headers=headers, data=data2, cookies=request_cookie)
    sel = Selector(text=resp2.text)
    temp_result = parse_tables(sel)
    if temp_result:
        totoal_result.update(temp_result)

    infos2 = re.findall('<td.*?class="f12rown2".*?>(.*?)</td>', resp2.text, re.S | re.M)

    ## 获取Contact Office table内容
    data3 = [
        ('TYPE', 'GetDispInfo'),
        ('Item', 'ContactOffice'),
        ('BL', BL),
    ]
    resp3 = requests.post(url, headers=headers, data=data3, cookies=request_cookie)

    sel = Selector(text=resp3.text)
    temp_result = parse_tables(sel)
    if temp_result:
        totoal_result.update(temp_result)
    infos3 = re.findall('<td.*?class="f12rown1".*?>(.*?)</td>', resp3.text, re.S | re.M)


    # 获取Pick-up Reference Information table 内容
    data4 = [
        ('TYPE', 'GetDispInfo'),
        ('Item', 'PickupRef'),
        ('BL', BL),
        ('firstCtnNo', firstCtnNo),
    ]
    resp4 = requests.post(url, headers=headers, data=data4, cookies=request_cookie)

    sel = Selector(text=resp3.text)
    temp_result = parse_tables(sel)
    if temp_result:
        totoal_result.update(temp_result)
    infos4 = re.findall('<td.*?class="f12rown1".*?>(.*?)</td>', resp4.text, re.S | re.M)

    try:
        bl_num = re.findall('.*?B/L No\.</td>.*?<td.*?>(.*?)</td>', resp.text,
                            re.S | re.M)[0]
    except:
        bl_num = ''
    # print(bl_num)
    try:
        vessel_num = re.findall('.*?Vessel Voyage on B/L</td>.*?<td.*?>(.*?)</td>',
                                resp.text, re.S | re.M)[0].strip()
    except:
        vessel_num = ''
    # print(vessel_num)
    infos = re.findall('<td.*?class="f12rown1".*?>(.*?)</td>', resp.text,
                       re.S | re.M)

    if infos == []:
        print('Can\'t get details!!')
    # print(infos)
    info_process = []
    for info in infos:
        try:
            info = info.replace('&nbsp;', '').replace('&#x28;', '(').replace('&#x29;', ')').replace('&#x2f;',
                                                                                                    '/').replace(
                '&#x27;', '\'').strip()
            if '<' in info:
                info = re.findall('<.*?>(.*?)<.*?>', info)[0].strip()
        # print(info)
        except:
            info = ''
        info_process.append(info)



    info1_process = []
    for info in infos1:
        try:
            info = info.replace('&nbsp;', '').replace('&#x28;', '(').replace('&#x29;', ')').replace('&#x2f;',
                                                                                                    '/').replace(
                '&#x27;', '\'').replace('&#xa','').strip()
        # print(info)
        except:
            info = ''
        info1_process.append(info)


    info2_process = []
    for info in infos2:
        try:
            info = info.replace('&nbsp;', '').replace('&#x28;', '(').replace('&#x29;', ')').replace('&#x2f;',
                                                                                                    '/').replace(
                '&#x27;', '\'').replace('&#xa','').strip()
        # print(info)
        except:
            info = ''
        info2_process.append(info)
    info3_process = []
    for info in infos3:
        try:
            info = info.replace('&nbsp;', '').replace('&#x28;', '(').replace('&#x29;', ')').replace('&#x2f;',
                                                                                                    '/').replace(
                '&#x27;', '\'').replace('&#xa','').strip()
        # print(info)
        except:
            info = ''
        info3_process.append(info)
    info4_process = []
    for info in infos4:
        try:
            info = info.replace('&nbsp;', '').replace('&#x28;', '(').replace('&#x29;', ')').replace('&#x2f;',
                                                                                                    '/').replace(
                '&#x27;', '\'').replace('&#xa','').strip()
        # print(info)
        except:
            info = ''
        info4_process.append(info)

    return [bl_num, vessel_num, *info_process,*info1_process,*info2_process,*info3_process,*info4_process,container_result]


if __name__ == "__main__":
    pass
    # BL = '146900296776'
    # base_url = 'https://www.shipmentlink.com/servlet/TUF1_CaptchaUtils?d='
    #
    # timestamp = str(int(round(time.time() * 1000)))
    # imageurl = base_url + timestamp
    #
    # save_pic(imageurl)
    # print(imageurl)
    # print(timestamp)
    #
    # imagecode = predict(img_dir=testpath)
    # print(imagecode)
    #
    # timestamp = 1621603222845
    # imagecode = 'PKUT'
    #
    # cookie_id = get_cookie()
    #
    # cookies = {'TDB1_Function_Type': 'quick', 'JSESSIONID': str(cookie_id)}
    #
    # res = post_code(timestamp, imagecode, BL)
    # with open(f'html_{BL}.html', 'w', encoding='utf-8') as f:
    #     f.write(res)

    # bl_num = re.findall('.*?B/L No\.</td>.*?<td.*?>(.*?)</td>', res,
    #                     re.S | re.M)[0]
    # print(bl_num)
    # vessel_num = re.findall('.*?Vessel Voyage on B/L</td>.*?<td.*?>(.*?)</td>',
    #                         res, re.S | re.M)[0].strip()
    # print(vessel_num)
    # infos = re.findall('<td.*?class="f12rown1".*?>(.*?)</td>', res,
    #                    re.S | re.M)
    # print(infos)

    # infos = ['\r\n\t\t\t\t                                <input type="text" name="NO" maxlength="12" style="width:220px;margin-bottom:5px;" value="146900296776" onkeypress="IsEngorInt(event)" onkeyup="this.value = this.value.toUpperCase();">&nbsp;&nbsp;\r\n\t\t\t\t                            \t\r\n\t\t\t\t                            \t<div id="captcha_div_s"></div><br>\r\n\t\t\t\t\t\r\n\t\t\t\t\t\t\t\t\t\t\t\t<input type="button" align="absmiddle" value="Submit" onClick="javascript:frmSubmit(13,2);" />\r\n\t\t\t\t                            ', '&nbsp;XIAMEN, CHINA &#x28;CN&#x29;', '&nbsp;    1', '&nbsp;XIAMEN, CHINA &#x28;CN&#x29;', '&nbsp;8,367.200 KGS', '&nbsp;LOS ANGELES, CA &#x28;US&#x29;', '&nbsp;          63.5120 CBM', '&nbsp;LOS ANGELES, CA &#x28;US&#x29;', '&nbsp;       664 CARTONS', '&nbsp; ', '&nbsp;MAY-06-2019', '&nbsp;PORT&#x2f;PORT', '&nbsp;', '&nbsp;&nbsp;', '&nbsp;', '&nbsp;&nbsp;', '&nbsp;', '\r\n\t\t\t\t\t<a href="javascript:frmCntrMoveDetail(\'TCNU6617530\');" onMouseOver="window.status=\'Container:TCNU6617530 onboard:20190506 pol:CNXSM\';return true;" onMouseOut="window.status=\'\'">TCNU6617530</a>\r\n\t\t\t\t', '\r\n                    <span ecType="ToolTip" msg="40&#x20;FEET&#x20;HI-CUBE&#x20;STEEL&#x20;DRY&#x20;CARGO&#x20;CONTAINER" underline>40&#x27;&#x28;SH&#x29;</span>\t\r\n\t\t\t\t', 'EMCRXU3688', 'FCL&#x2f;FCL', '       664 CARTONS', '', '', 'Empty container returned', 'MAY-30-2019']
    # print(len(infos))
    # for info in infos:
    #     info = info.replace('&nbsp;', '').replace('&#x28;', '(').replace('&#x29;', ')').replace('&#x2f;', '/').replace('&#x27;', '\'').strip()
    #     if '<' in info:
    #         info = re.findall('<.*?>(.*?)<.*?>', info)[0].strip()
    #     print(info)