import requests
import bs4

def get_article_href(url):
    ori_content = requests.get(url).text
    soup = bs4.BeautifulSoup(ori_content)
    h2_tag = soup.find_all('h2', attrs = {"class": "node-title"})
    paper_url_list = []
    
    for item in h2_tag:
        label_a = item.find('a')
        if label_a is not None and '/presentation/' in label_a.get('href'):
            paper_url_list.append('https://www.usenix.org' + label_a.get('href'))

    return paper_url_list


def get_abstract_from_article_href(url):
    ori_content = requests.get(url).text
    soup = bs4.BeautifulSoup(ori_content)
    title = soup.find('h1').get_text()
    abstract = soup.find_all('div', attrs = {'class': 'field-item odd'})[1].get_text()
    return title + '\n' + abstract + '\n\n'

if __name__ == "__main__":
    for year in ['19']:
        paper_url_list = get_article_href(f'https://www.usenix.org/conference/nsdi{year}/technical-sessions')
        total_paper = ''
        print(paper_url_list)
        for url in paper_url_list:
            try:
                title_abstract = get_abstract_from_article_href(url)
                total_paper += title_abstract
            except Exception as e:
                print(e)
                print('!!!Exception:   ' + url)
            
        with open('nsdi' + year + '.txt', 'w') as f:
            f.write(total_paper)