# -*- coding: utf-8 -*-
import scrapy

import md5
import pandas as pd
from sqlalchemy.exc import IntegrityError

from miracledata.url import URL, URLError
from miracledata.models import Headline
from miracledata.web import db
from miracledata.investdb import engine


urls = URL.from_template(
    "http://vip.stock.finance.sina.com.cn/corp/view/"\
    "vCB_AllBulletin.php?stockid={sid}&Page={n}"
)

use_sid = "600"

urls.create_many([{"sid":use_sid, "n":n} for n in xrange(20) ])

sql = "select * from turnover where sid='{sid}';".format(sid=use_sid)
df = pd.read_sql(sql, engine, index_col='date')

class ReaderSpider(scrapy.Spider):
    name = "reader"
    #allowed_domains = ["sina.com"]
    start_urls = urls

    def parse(self, response):
        dates = response.xpath('//div[@class="datelist"]/ul/text()[following-sibling::br]')
        titles = response.xpath('//div[@class="datelist"]/ul/a/text()')
        #import pdb; pdb.set_trace()
        for date, title in zip(dates, titles):
            row = {}
            date_ = date.extract().strip()
            title_ = title.extract().strip()

            key = md5.new(title_.encode("gb2312")).hexdigest()

            row["key"] = key
            row["date"] = date_
            row["sid"]  = use_sid
            row["content"] = title_
            try:
                chg = df.get_value(date_, "chg")
            except KeyError:
                chg = 0.00

            row["chg"] = round(chg, 2)
            try:
                db.session.add(Headline(**row))
                db.session.commit()
            except IntegrityError:
                db.session.rollback()





