# coding:utf-8
import requests
from lxml import etree
from modules import init_db
from BoolmFilter.memory_filter import MemoryFilter


# num = sys.argv[1]


class GetHelloGithubPython():
    def __init__(self, num):
        self.num = num
        self.url = "https://hellogithub.com/periodical/category/Python%20%E9%A1%B9%E7%9B%AE/?page="

    def get_content(self, content):
        html = etree.HTML(content)
        title = html.xpath('//*[@id="main"]/div[4]/h2[*]/a[2]/text()')
        period = html.xpath('//*[@id="main"]/div[4]/p[*]/span/strong/i[1]/text()')
        star = html.xpath('//*[@id="main"]/div[4]/p[*]/span/strong/i[2]/text()')
        watch = html.xpath('//*[@id="main"]/div[4]/p[*]/span/strong/i[3]/text()')
        fork = html.xpath('//*[@id="main"]/div[4]/p[*]/span/strong/i[4]/text()')
        url = html.xpath('//*[@id="main"]/div[4]/h2[*]/a[2]/@href')
        content = html.xpath('//*[@id="main"]/div[4]/p[*]/br[1]')
        li = []
        for i in range(len(title)):
            new_url = url[i].replace('/periodical/statistics/click/?target=', '')
            data = {'title': title[i], 'period': period[i], 'star': star[i], 'watch': watch[i], 'fork': fork[i],
                    'url': new_url, 'content': str(content[i].tail).strip(), 'url_hash': mem_filter._get_hash_value(new_url)}
            if db_hello_github.find_one({'url_hash': data.get('url_hash')}):
                print('have repeat data: ', new_url)
            else:
                li.append(data)
        if li:
            db_hello_github.insert_many(li)

    def crawl(self):
        for page in range(1, self.num):
            url = self.url + str(page)
            content = requests.get(url).text

            self.get_content(content)


if __name__ == '__main__':
    db = init_db()
    db_hello_github = db['hello_github']
    mem_filter = MemoryFilter()
    test = GetHelloGithubPython(50)
    test.crawl()
