# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from scrapy.linkextractors import LinkExtractor

import md5
import pandas as pd
from sqlalchemy.exc import IntegrityError

from miracledata.models import EMComment
from miracledata.web import db
from miracledata.investdb import engine

use_sid = "000800"

urlx = "http://guba.eastmoney.com/list,{sid}_{n}.html"
url = urlx.format(sid=use_sid, n=1)

class EmgubaSpider(scrapy.Spider):
    name = "emguba"
    allowed_domains = ["eastmoney.com"]
    start_urls = ( url,)

    def parse(self, response):
        current_page =\
            response.xpath('//span[@class="pagernums"]/@data-pager').extract()[0]
        divs = response.xpath('//div[@class="articleh"]')
        for div in divs:
            row = {}
            try:
                # ['34319', '182', ' ', '11-17', '11-17 16:00']
                read_count = div.xpath('./span[@class="l1"]/text()').extract()[0]
                comment_count = div.xpath('./span[@class="l2"]/text()').extract()[0]
                last_update = div.xpath('./span[@class="l5"]/text()').extract()[0]
                comment_title = div.xpath('./span/a/@title').extract()[0]
                #info_type = div.xpath('./span/em/@class').extract()[0]
                news_url = div.xpath('./span[@class="l3"]/a/@href').extract()[0]
                #print info_type
            except IndexError as e:
                #import pdb; pdb.set_trace()
                #print e
                continue

            # import pdb; pdb.set_trace()
            # set row
            row["key"] =  md5.new(comment_title.encode('utf-8')).hexdigest()
            row["sid"] = use_sid
            row["title"] = comment_title
            row["read_count"] =  int(read_count)
            row["comment_count"] = int(comment_count)
            row["last_update"] = last_update

            date_url = response.urljoin(news_url)
            #print date_url
            yield Request(url=date_url, meta={'row': row}, callback=self.parse_publish_date)

        # now return the next page
        # list,002091_|15790|80|3
        items = current_page.split("|")
        count = int(items[1])
        size  = int(items[2])
        current = int(items[3])
        left = count % size
        if left != 0:
            total = count / size + 1
        else:
            total = count / size
        if current < total:
            next_url = url=urlx.format(sid=use_sid, n=current+1)
            print next_url
            yield Request(url=next_url, callback=self.parse)


    def parse_publish_date(self, response):
        wordstr = response.xpath("//div[@class='zwfbtime']/text()").extract()[0]
        words = wordstr.split(' ')
        row = response.meta['row']
        row["publish_time"] = words[2]
        row['publish_date'] = words[1]

        #import pdb; pdb.set_trace()
        sql = "select chg from turnover where sid='{sid}' and date='{date}';"
        record = engine.execute(sql.format(sid=use_sid, date=words[1]))
        #import pdb; pdb.set_trace()
        if record.rowcount == 0:
            row['chg'] = 0.0 
        else:
            row["chg"] = record.first()[0]
        comment = EMComment(**row)
        try:
            db.session.add(comment)
            db.session.commit()
        except IntegrityError as e:
            db.session.rollback()
            #print e
        return row