'''
OSChinaCrawler V1.0
By SkyFire
QQ:1513008876
E-Mail:skyfireitdiy@hotmail.com
'''


import httplib2
import re
from urllib.parse import urlencode
from bs4 import BeautifulSoup


class OSChinaCrawler:
    __http_client=httplib2.Http('.Cache')

    def __init__(self):
        pass

    def __search_project(self,keyword):
        '''
        搜索项目
        :param keyword:关键字
        :return: 搜索结果
        '''
        try:
            data={'scope':'project','q':keyword}
            response,content=self.__http_client.request('https://www.oschina.net/search?'+urlencode(data),
                                                        headers={'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                                                                 'accept-encoding':'gzip, deflate, sdch',
                                                                 'accept-language':'zh-CN,zh;q=0.8',
                                                                 'upgrade-insecure-requests':'1',
                                                                 'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36'})
            if response['status']=='200':
                match=re.search(r'\.href="([^"]+)"',content.decode('utf-8'))
                if len(match.groups())!=0:
                    response,content=self.__http_client.request("https://www.oschina.net/search"+match.group(1),
                                                                headers={
                                                                    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                                                                    'accept-encoding': 'gzip, deflate, sdch',
                                                                    'accept-language': 'zh-CN,zh;q=0.8',
                                                                    'upgrade-insecure-requests': '1',
                                                                    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36'}
                                                                )
                    if response['status']=='200':
                        return content.decode('utf-8')
                return ''
        except:
            return ''

    def __get_pre_line(self,content,line):
        '''
        获取匹配行的前一行
        :param content:原内容
        :param line: 匹配行
        :return: 返回前一行内容
        '''
        index=content.find(line)
        if index!=-1:
            pre_index=content.rfind('\n',0,index-1)
            if pre_index!=-1:
                return content[pre_index+1:index-1]
        return ''

    def __get_next_line(self,content,line):
        '''
        获取匹配行后一行
        :param content:原内容
        :param line: 匹配行
        :return: 返回匹配行后一行
        '''
        index=content.find(line)
        if index!=-1:
            index+=len(line)+1
            next_index=content.find('\n',index)
            if next_index!=-1:
                return content[index:next_index]
        return ''

    def __get_page_count(self,content):
        '''
        获取总页数
        :param content:去掉html标签的网页内容
        :return:
        '''
        match=re.search(r'共 ([0-9]+) 页',content)
        if len(match.groups())!=0:
            return int(match.group(1))
        return 0

    def __get_page_content(self,pagenum,keyword):
        '''
        获取指定页的内容
        :param pagenum: 页码
        :param keyword: 关键字
        :return: 页内容
        '''
        try:
            data={'scope':'project','tag1':0,'tag2':0,'lang':0,'os':0,'q':keyword,'p':str(pagenum)}
            response, content = self.__http_client.request('https://www.oschina.net/search?'+urlencode(data),
                                                           headers={
                                                               'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                                                               'accept-encoding': 'gzip, deflate, sdch',
                                                               'accept-language': 'zh-CN,zh;q=0.8',
                                                               'upgrade-insecure-requests': '1',
                                                               'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36'})
            if response['status'] == '200':
                match = re.search(r'\.href="([^"]+)"', content.decode('utf-8'))
                response, content = self.__http_client.request("https://www.oschina.net/search" + match.group(1),
                                                               headers={
                                                                   'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                                                                   'accept-encoding': 'gzip, deflate, sdch',
                                                                   'accept-language': 'zh-CN,zh;q=0.8',
                                                                   'upgrade-insecure-requests': '1',
                                                                   'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36'}
                                                               )
                if response['status']=='200':
                    return content.decode('utf-8')
            return ''
        except:
            return ''

    def get_data(self,keyword):
        '''
        获取数据
        :param keyword: 关键字
        :return: 返回(是否获取成功，结果列表)
        '''
        urls=[]
        titles=[]
        intros=[]
        raw_data=self.__search_project(keyword)
        if len(raw_data)!=0:
            no_html = BeautifulSoup(raw_data, 'html.parser').get_text()
            page_count = self.__get_page_count(no_html)
            print(page_count)
            print(raw_data)
            for j in range(1, page_count+1):
                raw_data = self.__get_page_content(j,keyword)
                if len(raw_data)==0:
                    continue
                no_html = BeautifulSoup(raw_data, 'html.parser').get_text()
                temp_urls=re.findall(r'<h3><a href=\'(https://www.oschina.net/p/[a-zA-Z0-9]+)\'',raw_data);
                urls.extend(temp_urls)
                for i in temp_urls:
                    titles.append(self.__get_pre_line(no_html,i))
                for i in temp_urls:
                    intros.append(self.__get_next_line(no_html, i))
            ret_list=[]
            count=min(len(urls),len(titles),len(intros))
            for i in range(count):
                ret_list.append((urls[i],titles[i],intros[i]))
            return (len(urls)==len(titles) and len(urls)==len(intros)),ret_list
        return False,[]

if __name__=='__main__':
    crawler=OSChinaCrawler()
    keyword=input('Key Word:')
    ret,data=crawler.get_data(keyword)
    print(ret)
    print('Find '+str(len(data))+' records')
    for i in data:
        print(i)