
from bs4 import BeautifulSoup
import pickle
import urllib.parse

import urllib.request,urllib.error

import urllib3
import requests
urllib3.disable_warnings()

# 第一组  17:35
countrys = ["ZW","ZM","YE","WF","VA","VU","UZ","UY","AE","UA","UG","VI","TV","TC","TM","TN","TT","TO","TG","TH","TZ","TJ","SY",
            "CH","SZ","SR","SD","VC","PM","XI","LC","KN","LK","SS","ZA","SO","SB","SI","SK","SL"]

pages = [2,6,10,7,34,51,1,21,73,63,3,2,31,3,13,17,20,7,43,119,41,2,5,
         63,3,5,11,82,6,2,4,61,71,4,79,35,9,11,12,56]

# 测试数据
# countrys = ["ZW","JE"]
# pages = [2,1]

def main():
    baseurl = 'https://www.vesselfinder.com/vessels?page='
    savepath = r'D:\Python works\Some scripts\Ship_info\ship\shipZW.pkl'
    getData(baseurl,savepath)
    # saveData(datalist,savepath)

def getData(baseurl,savepath):
    datalist1 = []
    for j,country in enumerate(countrys):
        print(f'Start storage country {country}')
        # 存储该国家的各个页面
        for i in range(1,pages[j]+1):#每一页
            print('start parse page'+str(i)+' &flag={}'.format(country))
            url = baseurl + str(i) + '&flag={}'.format(country)  # flag按照国家爬取
            html = askURL(url)
            datalist2 = []
            soup = BeautifulSoup(html,'html.parser')

            # if soup.find('img',alt='No photo') != None:
            #     break

            f = open(savepath,'ab')

            for item in soup.find_all('a',class_='arc'):
                url2 = 'https://www.vesselfinder.com' + item.attrs['href']
                html2 = askURL(url2)
                soup2 = BeautifulSoup(html2,'html.parser')
                ship = {}
                tablekey = []
                tablevalue = []
                if soup2.find('img',alt='No photo') != None:
                    break

                for item2 in soup2.find_all('td',class_='n3'):
                    tablekey.append(item2.text)
                for item2 in soup2.find_all('td',class_='v3'):
                    tablevalue.append(item2.text)
                zipped = list(zip(tablekey,tablevalue))


                for i in range(len(zipped)):
                    if zipped[i][0]=='Flag':
                        ship['shipflag']=zipped[i][1]
                        break
                for i in range(len(zipped)):
                    if zipped[i][0]=='IMO / MMSI':
                        ship['IMO/MMSI']=zipped[i][1]
                        break
                for i in range(len(zipped)):
                    if zipped[i][0]=='Ship type':
                        ship['shiptype']=zipped[i][1]
                        break
                for i in range(len(zipped)):
                    if zipped[i][0]=='IMO number':
                        ship['IMO']=zipped[i][1]
                        break


                # if soup2.find_all('img',class_='main-photo').attrs['src'] == 'https://static.vesselfinder.net/images/cool-ship2@2.png':
                #     continue
                for item2 in soup2.find_all('h1',class_='title is-uppercase st'):#shipname
                    if item2 == None:
                        break
                    else:
                        ship['shipname'] = item2.get_text()

                for item2 in soup2.find_all('img',class_='main-photo'):#shippic
                    if item2 == None:
                        break
                    # elif item2.attrs['src'] == 'https://static.vesselfinder.net/images/cool-ship2@2.png':
                    #     break
                    else:
                        ship['shippic'] = requests.get(item2.attrs['src'],verify=False).content

                # for item2 in soup2.find_all('p',class_='text2'):#shipinfo
                #     if item2 == None:
                #         break
                #     else:
                #         ship['info'] = item2.get_text()

                # for item2 in soup2.find_all('td',class_='v3 v3np'):
                #     if item2 == None:
                #         break
                #     else:
                #         ship['IMO/MMSI']=item2.get_text()

                pickle.dump(ship,f)
                # datalist2.append(ship)
            f.close()
            # datalist1.extend(datalist2)


        # return datalist1



def askURL(url):
    head = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3868.400 QQBrowser/10.8.4394.400','Connection':'close'
    }
    # request = urllib.request.Request(url,headers=head)
    html = ''
    try:
        # time.sleep(3)
        urllib3.disable_warnings()# response = urllib.request.urlopen(request)
        # session = requests.session()
        # requests.adapters.DEFAULT_RETRIES = 5
        response = requests.get(url,headers=head,verify=False)
        # s = requests.session()
        # s.keep_alive = False
        # response = s.get(url,headers=head,verify=False)
        # print(type(response))
        # html = response.decode('utf-8')
        html = response.text.encode('utf-8')
    except urllib.error.URLError as e:
        # if hasattr(e,'code'):
        #     print(e.code)
        if hasattr(e,'reason'):
            print(e.reason)
    return html

def saveData(datalist,savepath):
    f = open(savepath,'wb')
    for i in range(len(datalist)):
        pickle.dump(datalist[i],f)
    f.close()

if __name__ == '__main__':
    main()
    print('done!')