import requests
from urllib.parse import urlencode
from bs4 import BeautifulSoup
import json,traceback
import find_cn
import re
from multiprocessing import Pool
from functools import partial
import get_page
def get_data(page,keyword):
    print('page  ',page)
    data_list = []
    # 参数
    paras={
        'l':'Python',
        'q':keyword,
        'p':page
    }
    url_base='https://github.com/search?'
    url=url_base+urlencode(paras)

    # 请求
    resp=get_page.get_page(url)
    if(not resp):
        return []
    # print(re.text)

    soup=BeautifulSoup(resp.text,'lxml')

    # 解析
    c=soup.find_all('ul','repo-list')
    try:
        # 列表
        repo_list=c[0].find_all('div','repo-list-item')
    except:
        print('page ',page,' error')
        return data_list
    # 搞东西
    for cc in repo_list:

        text_gray=str(cc.find('div','text-gray').get_text())

        if 'Python' in text_gray:
            try:
                star_num = str(cc.find('a', 'muted-link').get_text()).replace('k', '000').replace('.', '')
                num = re.findall('\d+', star_num)[0]
                if(int(num)>50): #star数量超过50
                    link='https://github.com'+cc.find('a').attrs['href']
                    content=find_cn.find_cn(link)
                    if(content): #检测这个目标链接的READ.me 有没有中文
                        temp={}
                        temp['star_num']=num
                        temp['link']=link
                        temp['content']=content
                        data_list.append(temp)
                        print(temp)
                    else:
                        pass
                else:
                    pass
                    #print(star_num)
            except:
                traceback.print_exc(file=open("b.txt", "a", encoding='utf-8'))
                print('error in b.txt')
        else:
            pass
    return data_list

if __name__=='__main__':


    target='a'

    pool=Pool(5)
    data=[]
    partial_work = partial(get_data, keyword='{}'.format(target))
    results = pool.map(partial_work, [i for i in range(1,88)])
    for each in results:
        for ee in each:
            data.append(ee)
    with open('{}.json'.format(target),'w',encoding='utf-8')as file:
        json.dump(data,file,ensure_ascii=False)
    print('OK')