from bs4 import BeautifulSoup
import pickle
import urllib.parse
import urllib.request,urllib.error
import urllib3
import requests
urllib3.disable_warnings()
from lxml import etree

# 200+还有图片
# "US","GB","ES","PA","NL","LR","DE","FR",
countrys = ["US","GB","ES","PA","NL","LR","DE","FR"]
types = [901]

# 29就该下一页了

# 200+后没有图片
# countrys = ["VN","ZZ","TR","TW","SE","SG","NO",
#             "MH","MT","MY","KR","JP","IT","IR","ID","IN","HK"]
# pages = [200,200,200,200,200,200,200,
#          200,200,200,200,200,200,200,200,200,200]


# 测试数据
# countrys = ["US"]
# types = [4,402,401,
#          6,601,602,603,
#          3,2,8,5,901,7,0,1]
# pages = [2,1]

def main():
    baseurl = 'https://www.vesselfinder.com/vessels?page='
    
    savepath = r'D:\Python works\Some scripts\Ship_info\ship\ship300_11.pkl'
    getData(baseurl,savepath)
    # saveData(datalist,savepath)

def getData(baseurl,savepath):
    datalist1 = []
    for j,country in enumerate(countrys):
        test=0
        print(f'Start storage country {country}')

        for type in types: # 获取特定的类型
            # 存储该国家的各个页面
            for i in range(1,201):#每一页

                print('start parse page'+str(i)+' &type={}'.format(type)+' &flag={}'.format(country))
                url = baseurl + str(i) + '&type={}'.format(type) + '&flag={}'.format(country)  # flag按照国家爬取
                html = askURL(url)
                datalist2 = []
                soup = BeautifulSoup(html,'html.parser')

                f = open(savepath,'ab')



                for item in soup.find_all('a',class_='arc'):
                    url2 = 'https://www.vesselfinder.com' + item.attrs['href']
                    html2 = askURL(url2)
                    soup2 = BeautifulSoup(html2,'html.parser')

                    ship = {}
                    tablekey = []
                    tablevalue = []


                    if soup2.find('img',alt='No photo') != None:
                        test = 1
                        break

                    for item2 in soup2.find_all('td',class_='n3'):
                        tablekey.append(item2.text)
                    for item2 in soup2.find_all('td',class_='v3'):
                        tablevalue.append(item2.text)
                    zipped = list(zip(tablekey,tablevalue))


                    for i in range(len(zipped)):
                        if zipped[i][0]=='Flag':
                            ship['shipflag']=zipped[i][1]
                            break
                    for i in range(len(zipped)):
                        if zipped[i][0]=='IMO / MMSI':
                            ship['IMO/MMSI']=zipped[i][1]
                            break
                    for i in range(len(zipped)):
                        if zipped[i][0]=='Ship type':
                            ship['shiptype']=zipped[i][1]
                            break
                    for i in range(len(zipped)):
                        if zipped[i][0]=='IMO number':
                            ship['IMO']=zipped[i][1]
                            break


                    # if soup2.find_all('img',class_='main-photo').attrs['src'] == 'https://static.vesselfinder.net/images/cool-ship2@2.png':
                    #     continue
                    for item2 in soup2.find_all('h1',class_='title is-uppercase st'):#shipname
                        if item2 == None:
                            break
                        else:
                            ship['shipname'] = item2.get_text()

                    for item2 in soup2.find_all('img',class_='main-photo'):#shippic
                        if item2 == None:
                            break
                        # elif item2.attrs['src'] == 'https://static.vesselfinder.net/images/cool-ship2@2.png':
                        #     break
                        else:
                            ship['shippic'] = requests.get(item2.attrs['src'],verify=False).content

                    # for item2 in soup2.find_all('p',class_='text2'):#shipinfo
                    #     if item2 == None:
                    #         break
                    #     else:
                    #         ship['info'] = item2.get_text()

                    # for item2 in soup2.find_all('td',class_='v3 v3np'):
                    #     if item2 == None:
                    #         break
                    #     else:
                    #         ship['IMO/MMSI']=item2.get_text()

                    pickle.dump(ship,f)
                    # datalist2.append(ship)
                f.close()
                # 无图跳过该type
                if test == 1:
                    break
            # 无图直接跳过页码
            if test == 1:
                break

                # datalist1.extend(datalist2)


            # return datalist1



def askURL(url):
    head = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3868.400 QQBrowser/10.8.4394.400','Connection':'close'
    }
    # request = urllib.request.Request(url,headers=head)
    html = ''
    try:
        # time.sleep(3)
        urllib3.disable_warnings()# response = urllib.request.urlopen(request)
        # session = requests.session()
        # requests.adapters.DEFAULT_RETRIES = 5
        response = requests.get(url,headers=head,verify=False)
        # s = requests.session()
        # s.keep_alive = False
        # response = s.get(url,headers=head,verify=False)
        # print(type(response))
        # html = response.decode('utf-8')
        html = response.text.encode('utf-8')
    except urllib.error.URLError as e:
        # if hasattr(e,'code'):
        #     print(e.code)
        if hasattr(e,'reason'):
            print(e.reason)
    return html

def saveData(datalist,savepath):
    f = open(savepath,'wb')
    for i in range(len(datalist)):
        pickle.dump(datalist[i],f)
    f.close()

if __name__ == '__main__':
    main()
    print('done!')