# This is the crawler for OSChina website

import requests
from requests import RequestException
import re
import urllib.request
import ssl

ssl._create_default_https_context = ssl._create_unverified_context
regex_str = r'<div.*?project-item[.|\s|\S]*?title="(.*?)"[.|\s|\S]*?<span.*?>(.*?)</span>'


# start of fun


def get_page(url):
    try:
        headers = {'User-Agent': 'Mozilla/5.0'}

        # response = requests.get(url, headers)
        # print('Code: '+response.status_code)
        # print(response.status_code)
        # if response.status_code == 200:
        #     return response.text
        # return None

        # response1 = urllib.request.urlopen(url)
        req = urllib.request.Request(url, headers=headers)
        response1 = urllib.request.urlopen(req)
        # 获取状态码，200表示成功
        if response1.getcode() == 200:
            # 获取网页内容的长度
            html = response1.read()
            print('html length: ', len(html))
            return decode(html)
        return None
    except RequestException:
        return None


def decode(html):
    html = str(html, 'utf-8')
    return html


def parser(html):
    pattern = re.compile(regex_str, re.S)
    items = re.findall(pattern, html)
    for item in items:
        yield{
            'title': item[0],
            'price': item[1],
        }


def main():
    url = 'https://www.oschina.net'
    html = get_page(url)
    # print('html: ', html)
    # data = next(par)  # par.send(None)
    master_data = []
    for item in parser(html):
        pass
        # print(str(item))
        master_data.append(item)
    # par.send()
    # print(next(data))

# end of fun
main()

