from bs4 import BeautifulSoup
import requests,re

headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    "Accept - Encoding": "gzip, deflate",
    "Accept - Language": "zh-CN,zh;q=0.9",
    "Host": "httpbin.org",
    "Upgrade - Insecure - Requests": "1",
    "User - Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3741.400 QQBrowser/10.5.3863.400"
}

url = "https://www.infoq.com/news/"

# content = requests.get(url).text
# soup = BeautifulSoup(content, "lxml")
# for title_href in soup.find_all('div', class_='items__content'):
#     print([title.get('title')
#            for title in title_href.find_all('a') if title.get('title')])

def craw(url):
    response = requests.get(url)
    soup = BeautifulSoup(response.text, 'lxml')

    for title_href in soup.find_all('div', class_='items__content'):
        print([title.get('title')
               for title in title_href.find_all('a') if title.get('title')])

#craw(url)

for i in range(15, 46, 15):
    url =  "https://www.infoq.com/news/" + str(i)
    craw(url)