# coding=utf-8
import os,time
import requests
from bs4 import BeautifulSoup
import json,re
#import io #针对py2.7

headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
            'accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
            'accept-encoding': 'gzip, deflate, br',
            'accept-language': 'zh-CN,zh;q=0.9',
            'Host': 'www.osapublishing.org',
            'Referer': 'https://www.osapublishing.org/ol/upcomingissue.cfm',
}

for vol_num in range(1,45):#vol45为2020年卷号
    OL_List = []
    if vol_num<4:
        issue_max=6
    elif vol_num<13:
        issue_max=12
    else:
        issue_max=24
    for issue_num in range(1,issue_max+1):
        root_url='https://www.osapublishing.org/ol/issue.cfm?volume={}&issue={}'.format(vol_num,issue_num)#获取卷期URL


        # filename = 'view-source_https___www.osapublishing.org_ol_abstract.cfm_uri=ol-45-10-2692.html'
        # html1 = open(filename,'r',encoding='utf-8')
        
        html1 = requests.get(root_url, headers=headers).content.decode('utf-8')
        root_soup = BeautifulSoup(html1, 'html.parser')
        lists = root_soup.find_all('p',class_="article-title")
        article_num=len(lists)
        print('开始采集第{}卷第{}期，共{}篇...'.format(vol_num,issue_num,article_num))
        counter=0
        for list in lists:
            url='https://www.osapublishing.org'+list.find('a').get('href')# 获取文章URL
            counter+=1
            print('开始采集{}/{}篇...'.format(counter,article_num))
            start=time.time()
            cache = requests.get(url, headers=headers).content.decode('utf-8')
            soup = BeautifulSoup(cache, 'html.parser')
            dic = {}
            try:
                title = soup.find('h1',class_="article-title").get_text().strip()
                author = soup.find('p',class_="article-authors").get_text().strip()
                detail = soup.find('ul', class_="small list-inline col-md-12 article-journal-name").find_all('li')
                vol=detail[1].get_text().strip().replace(',','')
                issue=detail[2].get_text().strip()
                page=detail[3].get_text().strip()
                date=detail[4].get_text().strip().replace('(','').replace(')','')
                vol=re.sub('Vol. ','',vol)
                issue=re.sub('Issue ','',issue)
                page=re.sub('pp. ','',page)
                doi = soup.find('li', class_="article-doi").find('a').get('href')
                abstract = soup.find('div', class_="article-section page-section active").find('p').find('p').get_text().strip()
            except AttributeError:
                print("页面缺少一些属性！不过不用担心！")
                
            dic = {
                    'title': title,
                    'author': author,
                    'vol': vol,
                    'issue': issue,
                    'page/number': page,
                    'date': date,
                    'doi': doi,
                    'abstract':abstract
                   }

            OL_List.append(dic)

            with open('OL_VOL{}.json'.format(vol_num), 'w', encoding="utf-8") as file:
            # with io.open('OL_VOL{}.json'.format(vol_num), 'w', encoding="utf-8") as file:#针对py2.7
                file.write(json.dumps(OL_List, indent=2, ensure_ascii=False))
            end=time.time()
            print('{}/{}篇采集完成，耗时{}s。'.format(counter,article_num,(end-start)))
            time.sleep(1)
            
