import requests
from bs4 import BeautifulSoup

import pickle
import urllib.parse
import urllib.request,urllib.error

# "VN","ZZ","US","GB","TR","TW","SE","ES","SG","SA","RU",""超过200了
# countrys = ["ZW","ZM","YE","WF","VA","VU","UZ","UY","AE","UA","UG","VI","TV","TC","TM","TN","TT","TO","TG","TH","TZ","TJ","SY",
#             "CH","SZ","SR","SD","VC","PM","XI","LC","KN","LK","SS","ZA","SO","SB","SI","SK","SL"]
# pages = [2,6,10,7,34,51,1,21,73,63,3,2,31,3,13,17,20,7,43,119,41,2,5,
#          63,3,5,11,82,6,2,4,61,71,4,79,35,9,11,12,56]

countrys = ["TH","TZ","TJ","SY","CH","SZ","SR","SD","VC","PM","XI","LC","KN","LK","SS","ZA","SO","SB","SI","SK",
            "SL","SC","RS","SN","ST","SM","WS","SH","RW","RO","RE","QA","PR","PT"]
pages = [119,41,2,5,63,3,5,11,82,6,2,4,61,71,4,79,35,9,11,12,
        56,17,11,22,13,7,7,11,5,40,11,82,11,78]

# 测试用
# countrys = ["ZW",
#             "SZ"]
# pages = [2,
#          3]

# 因此需要把没有图的判断后直接break
# src="https://static.vesselfinder.net/images/cool-ship2@2.png"--无图显示
# src="https://static.vesselfinder.net/images/cool-ship2@2.png"--同page
# src="https://static.vesselfinder.net/images/cool-ship2@2.png"--不同page


def main():
    baseurl = 'https://www.vesselfinder.com/vessels?page='
    # savepath = r'D:\Python works\Some scripts\Ship_info\ship\ship{}.pkl'.format(country)
    getData(baseurl)
    # saveData(datalist,savepath)

def getData(baseurl):
    datalist1 = []
    for j,country in enumerate(countrys):
        # 声明ship根据国家分类进行爬取对应的存储路径
        savepath = r'D:\Python works\Some scripts\Ship_info\ship\ship{}.pkl'.format(country)
        print(f'Start storage country {country}')
        # 存储该国家的各个页面
        for i in range(1,pages[j]+1):#每一页
            print('start parse page'+str(i)+'&flag={}'.format(country))
            url = baseurl + str(i) + '&flag={}'.format(country)  # flag按照国家爬取
            html = askURL(url)
            datalist2 = []
            soup = BeautifulSoup(html,'html.parser')
            f = open(savepath,'ab')  #追加写进去，续写进去

        # def saveData(datalist,savepath):
        #     f = open(savepath,'wb')
        # for i in range(len(datalist)):
        #     pickle.dump(datalist[i],f)
        # f.close()
            for item in soup.find_all('a',class_='arc'):
                url2 = 'https://www.vesselfinder.com' + item.attrs['href']
                html2 = askURL(url2)
                soup2 = BeautifulSoup(html2,'html.parser')
                ship = {}
                for item2 in soup2.find_all('h1',class_='title is-uppercase st'):#shipname
                    if item2 == None:
                        break
                    else:
                        ship['shipname'] = item2.get_text()

                for item2 in soup2.find_all('img',class_='main-photo'):#shippic
                    if item2 == None:
                        break
                    else:
                        ship['shippic'] = requests.get(item2.attrs['src']).content

                for item2 in soup2.find_all('p',class_='text2'):#shipinfo
                    if item2 == None:
                        break
                    else:
                        ship['info'] = item2.get_text()

                for item2 in soup2.find_all('td',class_='v3 v3np'):
                    if item2 == None:
                        break
                    else:
                        ship['IMO/MMSI']=item2.get_text()

                pickle.dump(ship,f)
                # datalist2.append(ship)
            f.close()
            # datalist1.extend(datalist2)
    

    return datalist1



def askURL(url):
    head = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE',
    }
    request = urllib.request.Request(url,headers=head)
    html = ''
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode('utf-8')
    except urllib.error.URLError as e:
        # if hasattr(e,'code'):
        #     print(e.code)
        if hasattr(e,'reason'):
            print(e.reason)
    return html

def saveData(datalist,savepath):
    f = open(savepath,'wb')
    for i in range(len(datalist)):
        pickle.dump(datalist[i],f)
    f.close()

if __name__ == '__main__':
    main()
    print('done!')