
from bs4 import BeautifulSoup
import pickle
import urllib.parse

import urllib.request,urllib.error

import urllib3
import requests
urllib3.disable_warnings()

def main():
    baseurl = 'https://www.vesselfinder.com/vessels?page='
    
    savepath = r'D:\Python works\Some scripts\Ship_info\ship\shipZW.pkl'
    getData(baseurl,savepath)
    # saveData(datalist,savepath)

def getData(baseurl,savepath):
    datalist1 = []
    for i in range(1,5):#每一页
        print('start parse page'+str(i)+'&flag=ZW')
        url = baseurl + str(i) + '&flag=ZW'
        html = askURL(url)
        datalist2 = []
        soup = BeautifulSoup(html,'html.parser')
        f = open(savepath,'ab')
  
        for item in soup.find_all('a',class_='arc'):
            url2 = 'https://www.vesselfinder.com' + item.attrs['href']
            html2 = askURL(url2)
            soup2 = BeautifulSoup(html2,'html.parser')
            ship = {}
            tablekey = []
            tablevalue = []
            if soup2.find('img',alt='No photo') != None:
                break
           
            for item2 in soup2.find_all('td',class_='n3'):
                tablekey.append(item2.text)
            for item2 in soup2.find_all('td',class_='v3'):
                tablevalue.append(item2.text)
            zipped = list(zip(tablekey,tablevalue))


            for i in range(len(zipped)):
                if zipped[i][0]=='Flag':
                    ship['shipflag']=zipped[i][1]
                    break
            for i in range(len(zipped)):
                if zipped[i][0]=='IMO / MMSI':
                    ship['IMO/MMSI']=zipped[i][1]   
                    break
            for i in range(len(zipped)):
                if zipped[i][0]=='Ship type':
                    ship['shiptype']=zipped[i][1]   
                    break
            for i in range(len(zipped)):
                if zipped[i][0]=='IMO number':
                    ship['IMO']=zipped[i][1]   
                    break                                         


            # if soup2.find_all('img',class_='main-photo').attrs['src'] == 'https://static.vesselfinder.net/images/cool-ship2@2.png':
            #     continue
            for item2 in soup2.find_all('h1',class_='title is-uppercase st'):#shipname
                if item2 == None:
                    break
                else:
                    ship['shipname'] = item2.get_text()

            for item2 in soup2.find_all('img',class_='main-photo'):#shippic
                if item2 == None:
                    break
                # elif item2.attrs['src'] == 'https://static.vesselfinder.net/images/cool-ship2@2.png':
                #     break                
                else:
                    ship['shippic'] = requests.get(item2.attrs['src'],verify=False).content

            # for item2 in soup2.find_all('p',class_='text2'):#shipinfo
            #     if item2 == None:
            #         break
            #     else:
            #         ship['info'] = item2.get_text()

            # for item2 in soup2.find_all('td',class_='v3 v3np'):
            #     if item2 == None:
            #         break
            #     else:
            #         ship['IMO/MMSI']=item2.get_text()
            
            pickle.dump(ship,f)
            # datalist2.append(ship)
        f.close()
        # datalist1.extend(datalist2)
    

    # return datalist1



def askURL(url):
    head = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3868.400 QQBrowser/10.8.4394.400','Connection':'close'
    }
    # request = urllib.request.Request(url,headers=head)
    html = ''
    try:
        # time.sleep(3)
        urllib3.disable_warnings()# response = urllib.request.urlopen(request)
        # session = requests.session()
        # requests.adapters.DEFAULT_RETRIES = 5
        response = requests.get(url,headers=head,verify=False)
        # s = requests.session()
        # s.keep_alive = False
        # response = s.get(url,headers=head,verify=False)
        # print(type(response))
        # html = response.decode('utf-8')
        html = response.text.encode('utf-8')
    except urllib.error.URLError as e:
        # if hasattr(e,'code'):
        #     print(e.code)
        if hasattr(e,'reason'):
            print(e.reason)
    return html

def saveData(datalist,savepath):
    f = open(savepath,'wb')
    for i in range(len(datalist)):
        pickle.dump(datalist[i],f)
    f.close()

if __name__ == '__main__':
    main()
    print('done!')