#! -*- coding:utf-8 -*-

import math
import numpy as np
from numpy import *
import matplotlib.pyplot as plt
import sys, os, time, uuid, re, codecs
import urllib2
import chardet
from bs4 import BeautifulSoup
from pymongo import MongoClient
import cPickle

def gethtml(url):
    # headers = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
    # req = urllib2.Request(url=url, headers=headers)
    # data = urllib2.urlopen(req).read()
    content = urllib2.urlopen(url).read()
    typeEncode = sys.getfilesystemencoding()  ##系统默认编码
    print chardet.detect(content)
    infoencode = chardet.detect(content).get('encoding', 'utf-8')  ##通过第3方模块来自动提取网页的编码
    html = content  # content.decode(infoencode, 'ignore').encode("utf-8")
    # content =urllib2.urlopen(url).read().decode("gb2312").encode("utf-8") #decode("UTF-8").encode(systype) #data #.decode("UTF-8") #.encode(systype)
    # print html
    return html


# 下载数据并保存到本地
def download(pz):
    url = "http://stock2.finance.sina.com.cn/futures/api/json.php/IndexService.getInnerFuturesDailyKLine?symbol="+pz
    # http://stock2.finance.sina.com.cn/futures/api/json.php/IndexService.getInnerFuturesDailyKLine?symbol=M0
    # 一般截止到前一日(json格式):日期,开盘，最高，最低，收盘。成交量
    datastr = gethtml(url)
    data_list = list(datastr)
    datalenth = len(data_list)
    print "len:", datalenth
    write_file = open('./data/'+pz+'.pkl', 'wb')
    cPickle.dump(datastr, write_file, -1)
    write_file.close()
    return

def get60minutes():
    url = "http://stock2.finance.sina.com.cn/futures/api/json.php/IndexService.getInnerFuturesMiniKLine60m?symbol=RU0"
    # http://stock2.finance.sina.com.cn/futures/api/json.php/IndexService.getInnerFuturesDailyKLine?symbol=M0
    # 一般截止到前一日(json格式):日期,开盘，最高，最低，收盘。成交量
    datastr = gethtml(url)
    data_list = list(datastr)
    datalenth = len(data_list)
    print "len:", datalenth
    write_file = open('./data/' + 'RU60t' + '.pkl', 'wb')
    cPickle.dump(datastr, write_file, -1)
    write_file.close()

def getIndex(prices,time):
    return [i for (i, j) in enumerate(prices) if j[0] == time]

def getData(pz):
    read_file = open('./data/'+pz+'.pkl', 'rb')
    data = cPickle.load(read_file)
    # label = cPickle.load(read_file)
    read_file.close()
    # http://stock2.finance.sina.com.cn/futures/api/json.php/IndexService.getInnerFuturesDailyKLine?symbol=M0
    # 一般截止到前一日(json格式):日期,开盘，最高，最低，收盘。成交量
    data = eval(data)
    return data

#获取某种类的日线数据：日期,开盘，最高，最低，收盘。成交量
def getPrices(kind):
    read_file = open('./data/'+kind+'.pkl', 'rb')
    data = cPickle.load(read_file)
    # label = cPickle.load(read_file)
    read_file.close()
    # http://stock2.finance.sina.com.cn/futures/api/json.php/IndexService.getInnerFuturesDailyKLine?symbol=M0
    # 一般截止到前一日(json格式):日期,开盘，最高，最低，收盘。成交量
    data = eval(data)
    #mat = np.matrix(data)
    data = [[1.0, float(inner[1]), float(inner[2]), float(inner[3]), float(inner[4])] for inner in   data]  #:日期,开盘，最高，最低，收盘。成交量
    return data

#5/10/20/40日均线
def getMA(mat,len,index):
    data=[]
    for i in range(len):
        data.append(mat[index][4])
    start=0
    if index<len:
        start=len-index
    for i in range(start,len):
        data[i]=mat[index-i][4]
    data=np.matrix(data)
    #print data
    return np.mean(data)


#获取测试矩阵数据，ma5/ma8/ma14/ma20的数据
def getTrainingData(kind):
    prices = getPrices(kind)
    #print "stratery type:",type(prices)
    #data.reverse()
    cls = []
    ma5 = []
    ma8 = []
    ma14 = []
    ma20 = []
    total = []
    lens = len(prices) #1000
    for i in range(lens ):
        cls.append(prices[i][4])
        ma5.append(getMA(prices, 5, i))
        ma8.append(getMA(prices, 8, i))
        ma14.append(getMA(prices, 14, i))
        ma20.append(getMA(prices, 20, i))
        total.append([getMA(prices, 5, i), getMA(prices, 8, i), getMA(prices, 14, i), getMA(prices, 20, i)])
    #print total
    return (prices,total)


if __name__ == "__main__":
    print "download data from sina..."
    #download("L0")
    #download("P0")