# -*- coding: utf-8 -*-
from scrapy import log
import logging
from scrapy.log import ScrapyFileLogObserver
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from vndirect.items import *
from scrapy.http import Request, Response
from scrapy.http.cookies import CookieJar


class VnDirectSpider(BaseSpider):
    name = "vndirect"
    allowed_domains = ["vndirect.vn"]
    
    f = None
        
    def __init__(self):
        #write log file here
        logfile = open('testlog.log', 'w')
        log_observer = ScrapyFileLogObserver(logfile, level=logging.DEBUG)
        log_observer.start() #start logging
    
    def start_requests(self):
      
        # extract url file and call parse()
        f = open("ids.txt")
        ids = [url.strip() for url in f.readlines()]
        
        temp_url = "https://www.vndirect.com.vn/portal/bang-can-doi-ke-toan/%s.shtml?searchObject.fiscalQuarter=Q4&searchObject.fiscalYear=%d&searchObject.moneyRate=1,000&searchObject.numberTerm=4&&request_locale=en_GB"
        
        self.f = open('all-2013.csv','w')
        for ticker in ids:
                        
            if ticker == '':
                continue
            
            #self.fileManager[ticker] = open("onwer_equity/%s.csv" % ticker,'w')
            for yr in range(2013,2002,-1):                
                url = temp_url % (ticker,yr)
                yield Request(url, self.parse)
                
        self.f.close()    
        #[i.close() for i in fileManager]            
        
            
    def parse(self, response):
        
        #get the ticker
        
        #f = open('test.html','w')
        #f.write(response.body)
        #f.close()
        #return
        
        
        
        url =response.url 
        yr = url[url.find("fiscalYear=")+len("fiscalYear="):url.find("fiscalYear=")+len("fiscalYear=")+4];
        pos1 = url.find("toan/")
        pos2 = url.find(".shtml")        
        ticker = url[pos1+5:pos2]
        
        f = open("owner_equity//%s.csv" % ticker,'a')
        
        hxs = HtmlXPathSelector(response)
        items = hxs.select("//div[contains(.,'Investment Capital of Owners Equity ')]/../../td[position()>1]/div/text()").extract()
        if not items:
            print "no item here"            
            return 
        
        owner_items = [x.replace(',','').strip() for x in items][0:4]
        res = "%s,%s,%s\n" % (ticker, yr, ",".join(owner_items))        
        #self.fileManager[ticker].write( res )
        f.write(res)
        self.f.write(res)
        
        f.close()
                                    
        #
        #ticker = "".join().strip()
        
        
        
