'''
@File    :   crawl.py
@Time    :   2021/05/18 23:09:10
@Author  :   小木 
@Contact :   hunt_hak@outlook.com
'''

import re
import os
import requests


from scrapy.selector import Selector
from utils.text_utils import clean_text
from copy import deepcopy
from utils.http_utils import post_http,shipment_headers
from project_setting import image_data_path





def save_pic(url, basepath=image_data_path,request_cookie=None,headers=None):
    photo = requests.get(url, headers=headers, cookies=request_cookie)
    print(request_cookie)
    filename = os.path.join(basepath, '2.jpg')
    with open(filename, 'wb') as f:
        f.write(photo.content)



def parse_Transship_Information_table(trs,fisrt_key):
    result = {}
    if len(trs)<3:
        return result
    result[fisrt_key] = {}
    sencond_keys = trs[1].xpath("./td/text()").extract()
    third_values = trs[2].xpath('./td/text()').extract()
    for key,value in zip(sencond_keys,third_values):
        result[fisrt_key][clean_text(key)] = clean_text(value)

    return result

def parse_Advance_Filing_Status_table(trs,fisrt_key):
    result = {}
    if len(trs)<3:
        return result
    result[fisrt_key] = {}
    sencond_keys = trs[1].xpath("./td/text()").extract()
    third_values = trs[2].xpath('./td/text()').extract()
    for key, value in zip(sencond_keys, third_values):
        result[fisrt_key][clean_text(key)] = clean_text(value)

    return result

def parse_Release_Status_table(trs,fisrt_key):
    result = {}
    if len(trs)!=7:
        return result
    result[fisrt_key] = {}
    keys = trs[1].xpath("./td/text()").extract()
    keys = [key.replace(".","") for key in keys]
    values = trs[2].xpath('./td/text()').extract()
    result[fisrt_key][clean_text(keys[0])] = {}
    for key, value in zip(keys[1:], values):
        if clean_text(key) in result[fisrt_key][clean_text(keys[0])]:
            if clean_text(key) == 'Date':
                key = "Date1"
        result[fisrt_key][clean_text(keys[0])][clean_text(key)] = clean_text(value)

    keys = trs[3].xpath("./td/text()").extract()
    keys = [key.replace(".", "") for key in keys]
    values = trs[4].xpath('./td').xpath("string(.)").extract()
    result[fisrt_key][clean_text(keys[0])] = {}
    for key, value in zip(keys[1:], values):
        if clean_text(key) == 'Date':
            key = "Date1"
        if clean_text(key) in result[fisrt_key][clean_text(keys[0])]:
            if clean_text(value) == "N/A" or not clean_text(value):
                continue
        result[fisrt_key][clean_text(keys[0])][clean_text(key)] = clean_text(value)

    last_keys = trs[5].xpath("./td/text()").extract()
    last_keys = [key.replace(".", "") for key in last_keys]
    last_values = trs[6].xpath('./td/text()').extract()
    for key, value in zip(last_keys, last_values):
        if clean_text(key) in result[fisrt_key][clean_text(keys[0])]:
            if clean_text(value) == "N/A" or not clean_text(value):
                continue

        result[fisrt_key][clean_text(keys[0])][clean_text(key)] = clean_text(value)

    return result

def parse_Contact_Office_table(trs,fisrt_key):
    result = {}
    if len(trs) != 3:
        return result
    result[fisrt_key] = {}
    keys = trs[1].xpath("./td/text()").extract()
    values = trs[2].xpath('./td/text()').extract()
    result[fisrt_key][clean_text(keys[0])] = {}
    for key, value in zip(keys[1:], values):
        result[fisrt_key][clean_text(keys[0])][clean_text(key)] = clean_text(value)
    return result

#parse_Reference_Information_table

def parse_Reference_Information_table(trs,fisrt_key):
    result = {}
    if len(trs) != 6:
        return result
    result[fisrt_key] = {}
    for tr in trs[1:]:
        td_values = tr.xpath("./td/text()").extract()
        td_values = [key.strip() for key in td_values]
        for i in range(int(len(td_values)/2)):
            result[fisrt_key][td_values[2*i]] = td_values[2*i+1]
    return result

def paser_Container_Activity_Information_table(table,fisrt_key,sel,headers,cookies):
    result = {}
    trs = table.xpath('./tr')
    if len(trs) < 3:
        return result
    result[fisrt_key] = []
    key_tds = trs[1].xpath('./td').xpath("string(.)").extract()
    key_tds = [key.strip() for key in key_tds]

    key_tds = [ key[:-1] if key.endswith(".") else key for key in key_tds]
    for tr_value in trs[2:]:
        td_value_nodes = tr_value.xpath("./td")
        td_values = []
        try:
            td_values.append(re.findall('frmCntrMoveDetail\(\'(.*?)\'\)', td_value_nodes[0].extract(), re.S | re.M)[0])
        except Exception as e:
            td_values.append("")
        td_values += [td.xpath("string(.)").extract()[0] for td in td_value_nodes[1:]]
        td_values = [key.strip() for key in td_values]
        item_dict = {}
        for key, value in zip(key_tds, td_values):
            item_dict[clean_text(key)] = clean_text(value)
        result[fisrt_key].append(item_dict)

    input_hidden_nodes = sel.xpath(".//form[@name='frmCntrMove']/input[@type='hidden']")
    post_data_dict = {}
    for node in input_hidden_nodes:
        try:
            node_key = node.xpath("./@name").extract()[0]
            node_value = node.xpath("./@value").extract()[0]
            post_data_dict[node_key] = node_value
        except Exception as e:
            pass
    post_url = 'https://ct.shipmentlink.com/servlet/TDB1_CargoTracking.do'
    print(post_data_dict)
    #frmCntrMove
    #cntr_no

    for container in result['Containers']:
        contr_no = container['Container No']
        new_post_data = deepcopy(post_data_dict)
        new_post_data['cntr_no'] = contr_no
        html_text = post_http(post_url,new_post_data,cookies,headers)
        sub_sel = Selector(text= html_text)

        table_nodes = sub_sel.xpath('//table')
        assert len(table_nodes) == 2
        table_trs = table_nodes[1].xpath('./tr')
        if len(table_trs) <= 2:
            continue

        # 第一行tr pass
        # 第二行tr 是key
        # 后面的tr 都是value
        keys = table_trs[1].xpath('./td/text()').extract()
        keys = [clean_text(key) for key in keys]
        container_move_info = []
        for value_node in table_trs[2:]:
            values = value_node.xpath('./td')
            values = [value.xpath('string(.)').extract()[0] for value in values]

            values= [clean_text(value) for value in values]

            assert len(values) == len(keys)
            temp_dict = {key:value for key,value in zip(keys,values)}
            container_move_info.append(temp_dict)
        container['Container Moves'] = container_move_info


    return result


def parse_Basic_Information_table(trs,first_key):
    result = {}
    if len(trs) < 3:
        return result
    result[first_key] = {}
    for tr in trs[1:]:
        values = tr.xpath('./td')
        values = [value.xpath('string(.)').extract()[0] for value in values]
        values = [clean_text(value) for value in values]
        assert len(values) == 4
        if values[0] and values[1]:
            values[0] = values[0].replace(".","")
            result[first_key][values[0]] = values[1]
        if values[2] and values[3]:
            values[2] = values[2].replace(".", "")
            result[first_key][values[2]] = values[3]
    return result

def parse_top_table(tr):
    result = {}
    values = tr.xpath('./td')
    values = [value.xpath('string(.)').extract()[0] for value in values]
    values = [clean_text(value) for value in values]
    assert len(values) == 5
    if values[1] and values[2]:
        values[1] = values[1].replace(".","")
        result[values[1]] = values[2]
    if values[3] and values[4]:
        values[3] = values[3].replace(".", "")
        result[values[3]] = values[4]
    return result


def parse_tables(sel,headers=None,cookies=None):
    result = {}
    total_table = sel.xpath("//table")
    for table in total_table:
        trs = table.xpath('./tr')
        first_tr_html = trs[0].extract()
        if "Advance Filing Status" in first_tr_html:
            temp_result = parse_Advance_Filing_Status_table(trs,"Advance Filing Status")
        elif 'Transship Information' in first_tr_html:
            temp_result = parse_Transship_Information_table(trs,"Transship Information")
        elif 'Container(s) information on B/L and Current Status' in first_tr_html:
            temp_result = paser_Container_Activity_Information_table(table,"Containers",sel,headers,cookies)
        elif 'Release Status' in first_tr_html:
            temp_result = parse_Release_Status_table(trs,"ReleaseStatus")
        elif 'Contact Office' in first_tr_html:
            temp_result = parse_Contact_Office_table(trs,"Contact Office")
        elif 'Basic Information' in first_tr_html:
            temp_result = parse_Basic_Information_table(trs,"Basic Information")
        elif 'EGLV_logo' in first_tr_html:
            temp_result = parse_top_table(trs[0])
        elif 'Pick-up Reference Information' in first_tr_html:
            temp_result = parse_Reference_Information_table(trs, "Pick-up Reference Information")

        else:
            temp_result = {}
        result.update(temp_result)
    return result

def get_shipment_info(timestamp, captcha_input, BL='146900296776',request_cookie= None,headers=shipment_headers):
    BL = str(BL)
    url = 'https://ct.shipmentlink.com/servlet/TDB1_CargoTracking.do'

    data = [
        ('BL', BL),
        ('CNTR', ''),
        ('bkno', ''),
        ('TYPE', 'BL'),
        ('NO', ''),
        ('NO', ''),
        ('NO', ''),
        ('NO', ''),
        ('NO', ''),
        ('NO', BL),
        ('SEL', 's_bl'),
        ('captcha_input', captcha_input),
        ('hd_captcha_seq', timestamp),
    ]

    totoal_result = {}
    ## 第一步，获取到所有的container信息
    resp = requests.post(url, headers=headers, data=data, cookies=request_cookie)
    sel = Selector(text=resp.text)
    container_result = parse_tables(sel)
    totoal_result.update(container_result)


    firstCtnNo = re.findall('firstCtnNo=(.*?)&pod', resp.text,re.S | re.M)[0]
    print('firstCtnNo =',firstCtnNo)

    ## 获取Advance Filing Status 表格内容
    data1 = [
        ('TYPE', 'GetDispInfo'),
        ('Item', 'AMSACK'),
        ('BL', BL),
        ('firstCtnNo', firstCtnNo),  # Container No
        ('pod', 'USLAX'),
    ]
    resp1 = requests.post(url, headers=headers, data=data1, cookies=request_cookie)
    sel = Selector(text=resp1.text)
    temp_result = parse_tables(sel,headers=headers,cookies=request_cookie)
    if temp_result:
        totoal_result.update(temp_result)




    # 获取Release Status表格内容
    data2 = [
        ('TYPE', 'GetDispInfo'),
        ('Item', 'RlsStatus'),
        ('BL', BL),
    ]
    resp2 = requests.post(url, headers=headers, data=data2, cookies=request_cookie)
    sel = Selector(text=resp2.text)
    temp_result = parse_tables(sel)
    if temp_result:
        totoal_result.update(temp_result)


    ## 获取Contact Office table内容
    data3 = [
        ('TYPE', 'GetDispInfo'),
        ('Item', 'ContactOffice'),
        ('BL', BL),
    ]
    resp3 = requests.post(url, headers=headers, data=data3, cookies=request_cookie)

    sel = Selector(text=resp3.text)
    temp_result = parse_tables(sel)
    if temp_result:
        totoal_result.update(temp_result)


    # 获取Pick-up Reference Information table 内容
    data4 = [
        ('TYPE', 'GetDispInfo'),
        ('Item', 'PickupRef'),
        ('BL', BL),
        ('firstCtnNo', firstCtnNo),
    ]
    resp4 = requests.post(url, headers=headers, data=data4, cookies=request_cookie)

    sel = Selector(text=resp4.text)
    temp_result = parse_tables(sel)
    if temp_result:
        totoal_result.update(temp_result)

    return totoal_result


