"""
五、按以下要求写一个爬虫

1. 从网页 https://gitee.com/explore/recommend?page=1 开始爬取
2. 爬网页中每一个项目的作者的信息
    昵称            user_nick
    用户名          user_name
    用户描述        user_desc
    加入时间        join_time
    追随者          followers
    星的数量        stars
    关注了谁        following
    被查看数        watches
    用户额外信息    user_extra_info
3. 跟踪页面中的“下一页”链接，持续爬取
4. 提供一个命令行参数，用来控制总共爬取的页面数量
5. 避免存放重复的记录
6. 数据存放在Json Lines 中，一行一条记录
"""
import sys
import os

import requests
import json
from lxml import etree


def fetch(url):
    print('fetching page %s' % url)
    r = requests.get(url)
    if r.ok:
        return r.text
    else:
        return ''


def parse(tree, base_url, seen_users):
    for url in tree.xpath('//div[@id="git-discover-list"]/div/a[1]/@href'):
        # 首先判断该用户是否已经下载过了
        user_name = url.split('/')[-1]
        if user_name in seen_users:
            print('user %s already fetched' % user_name)
            continue
        else:
            seen_users.add(user_name)

        url = base_url + url
        html = fetch(url)
        if not html:
            print('failed to fetch page: %s' % url)
            break
        tree = etree.HTML(html)
        user_nick, user_name = tree.xpath('//div[@class="user-info"]/*/text()')
        user_desc = tree.xpath('//div[@class="git-user-bio" and position()=1]/span/text()')
        join_time = tree.xpath('//span[contains(@class, "join-time") and @title]/@title')
        followers, stars, following, watches = tree.xpath('//div[@class="git-user-infodata"]//a/div/text()')
        record = dict(user_nick=user_nick, user_name=user_name,
                      user_desc=user_desc, join_time=join_time,
                      followers=int(followers), stars=int(stars),
                      following=int(following), watches=int(watches))
        yield record


def get_next_url(tree, base_url):
    url = tree.xpath('//a[@rel="next"]/@href')
    if not url:
        return None
    url = base_url + url[-1]
    return url


if __name__ == '__main__':
    def help():
        print('usage: %s [limit]' % os.path.basename(sys.argv[0]))

    limit = 0
    if len(sys.argv) == 2:
        try:
            limit = int(sys.argv[1])
        except ValueError:
            print('invalid value of limit')
            help()
            exit(1)
    elif len(sys.argv) != 1:
        help()
        exit(1)

    base_url = 'https://gitee.com'
    url = 'https://gitee.com/explore/recommend?page=1'
    ofile = open('gitee_users.jl', 'w')

    processed_page = 0
    seen_users = set()
    while True:
        print('processing %s' % url)

        html = fetch(url)
        if not html:
            print('failed to fetch page: %s' % url)
            break

        tree = etree.HTML(html)
        records = parse(tree, base_url, seen_users)
        for record in records:
            print('saving record: %s' % str(record))
            ofile.write(json.dumps(record) + '\n')
            ofile.flush()

        processed_page += 1
        if limit and processed_page >= limit:
            print('processed %s pages, exit' % processed_page)
            exit(0)

        url = get_next_url(tree, base_url)
        if not url:
            break
