# coding=utf-8
import os,time,json,re
import requests
from bs4 import BeautifulSoup
from threading import Thread
# from queue import Queue
from Queue import Queue # 针对py2.7
import io #针对py2.7

class OL_Spider():

    def __init__(self,vol_num):
        self.OL_List = []
        self.vol_num = vol_num
        if self.vol_num<4:
            self.issue_max=6
        elif self.vol_num<13:
            self.issue_max=12
        else:
            self.issue_max=24

        self.qurl = Queue()
        self.thread_num = 20
        self.headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
                'accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
                'accept-encoding': 'gzip, deflate, br',
                'accept-language': 'zh-CN,zh;q=0.9',
                'Host': 'www.osapublishing.org',
                'Referer': 'https://www.osapublishing.org/ol/upcomingissue.cfm',
        }
    def produce_url(self):
        baseurl = 'https://www.osapublishing.org/ol/issue.cfm?volume={}&issue={}'
        for issue_num in range(1, self.issue_max + 1):
            urldata = baseurl.format(self.vol_num,issue_num)
            html1 = requests.get(urldata, headers=self.headers).content.decode('utf-8')
            root_soup = BeautifulSoup(html1, 'html.parser')# 推荐lxml，速度更快
            lists = root_soup.find_all('p',class_="article-title")
            for list in lists:
                url='https://www.osapublishing.org'+list.find('a').get('href')# 
                self.qurl.put(url) # 生成URL存入队列，等待其他线程提取

    def get_info(self):
        while not self.qurl.empty(): # 保证url遍历结束后能退出线程
            root_url = self.qurl.get() # 从队列中获取URL
            cache = requests.get(root_url, headers=self.headers).content.decode('utf-8')
            soup = BeautifulSoup(cache, 'html.parser')
            dic = {}
            try:
                title = soup.find('h1',class_="article-title").get_text().strip()
                author = soup.find('p',class_="article-authors").get_text().strip()
                detail = soup.find('ul', class_="small list-inline col-md-12 article-journal-name").find_all('li')
                vol=detail[1].get_text().strip().replace(',','')
                issue=detail[2].get_text().strip()
                page=detail[3].get_text().strip()
                date=detail[4].get_text().strip().replace('(','').replace(')','')
                vol=re.sub('Vol. ','',vol)
                issue=re.sub('Issue ','',issue)
                page=re.sub('pp. ','',page)
                doi = soup.find('li', class_="article-doi").find('a').get('href')
                abstract = soup.find('div', class_="article-section page-section active").find('p').find('p').get_text().strip()
            except AttributeError:
                print("页面缺少一些属性！不过不用担心！")
                
            dic = {
                    'title': title,
                    'author': author,
                    'vol': vol,
                    'issue': issue,
                    'page/number': page,
                    'date': date,
                    'doi': doi,
                    'abstract':abstract
                   }

            self.OL_List.append(dic)



    def run(self):
        self.produce_url()
        ths = []
        for _ in range(self.thread_num):
            th = Thread(target=self.get_info)
            th.start()
            ths.append(th)
        for th in ths:
            th.join()

        # with open('OL_VOL{}.json'.format(self.vol_num), 'w', encoding="utf-8") as file:
        with io.open('OL_VOL{}.json'.format(self.vol_num), 'w', encoding="utf-8") as file:#针对py2.7
            file.write(json.dumps(self.OL_List, indent=2, ensure_ascii=False))

if __name__ == '__main__':
    for i in range(1,45):
        print('开始提取第{}卷......'.format(i))
        start=time.time()
        OL_Spider(i).run()
        end=time.time()
        print('第{}卷提取完毕，用时{}秒。'.format(i,(end-start)))