# -*- coding: utf-8 -*-
import urllib2
import json
import time


def get_html(url):
    send_headers = {
        'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.81 Safari/537.36',
        'Accept':'*/*',
        'Connection':'keep-alive',
        'Host':'xueqiu.com',
        'Referer':'https://xueqiu.com/p/discover',
        'X-Requested-With':'XMLHttpRequest',
        'Cookie':'可从Chrome开发者工具中获取复制出cookie值',  # 这里需替换cookie值，否则会请求失败
    }
    req = urllib2.Request(url, headers=send_headers)
    resp = urllib2.urlopen(req)
    html = resp.read()
    return html    


def fetch_portfolio(code):
    url = 'http://xueqiu.com/p/' + code
    html = get_html(url)
    # 直接用字符匹配找出位置，然后截取字符
    pos_start = html.find('SNB.cubeInfo = ') + 15
    pos_end = html.find('SNB.cubePieData')
    data = html[pos_start:pos_end]
    dic = json.loads(data)

    print '收益率', dic['total_gain']
    stocks = dic['view_rebalancing']['holdings']
    for s in stocks:
        print s['stock_name'], s['weight']


def get_portfolio_list(page):
    url = 'https://xueqiu.com/cubes/discover/rank/cube/list.json?category=10&count=10&market=cn&page=%d' % page
    html = get_html(url)
    dic = json.loads(html)
    for p in dic['list']:
        print p['symbol'], p['name']
        # 结果存储添加至全局变量中
        portfolio_list.append((p['symbol'], p['name']))


# 抓取组合列表
global portfolio_list
portfolio_list = []
for page in xrange(1, 3):  # 需要抓取更多页，修改这里的page上限
    print 'fetch page', page
    get_portfolio_list(page)
    time.sleep(2)
# print portfolio_list
print '============================'

# 抓取每个组合的详细信息
for p in portfolio_list:
    print 'fetch', p[0], p[1]
    fetch_portfolio(p[0])
    time.sleep(1)
    print '---------------------------'

