class Scan < ActiveRecord::Base
  has_many :results, :dependent => :destroy

  #status
  #nil => 旧, 0 => 新, 1 => 读取完成, 2 => 保存完成, 3 => 异常

  def search ac_types
    url = "http://api.bilibili.tv/list?type=xml&appkey=#{Work.get_info_key}&order=default&pagesize=100"
    @http = Net::HTTP::Persistent.new(nil, (PROXY ? URI.parse(PROXY) : nil))
    @protect_num = 0 #防ban IP对策
    ac_types.each do |ac_type|
      date_now =  Time.now
      head_url = "#{url}&tid=#{ac_type.page_code}&page="
      first_page = search_again_and_again("#{head_url}1")
      page_count = (first_page / "pages").text.to_i
      while page_count.zero?
       first_page = search_again_and_again("#{head_url}1")
       page_count = (first_page / "pages").text.to_i
      end
      to_cache first_page/("list/data"), ac_type.id, 1

      #print  schedule in console
        puts "read #{ac_type.page_name} pages(#{page_count})."
        print "|"
      #end
      th_count = 2
      threads = ThreadGroup.new     
      (2..page_count).each do |i|
        tmp_url = "#{head_url}#{i}"
        while
          if th_count > 0
            th_count -= 1
            threads.add(Thread.new(i){ |i0|
              to_cache list_again_and_again(tmp_url), ac_type.id, i0
              print (i0 % 100).zero? ? "|" : "." if (i0 % 5).zero?
              th_count += 1
            })
            false
          else
            sleep 1
            true
          end
        end
      end
      while threads.list.select{|th| th.alive?}.size > 0
        print %(#{threads.list.select{|th| th.alive?}.size}waits...   ) if threads.list.select{|th| th.alive?}.size > 1
        sleep 2
      end
      @http.shutdown
      print "#{Time.now - date_now} second complete!\n"
    end
    true
  end
  
  def result_process work_update=false
    #获得文件列表
    require 'find'
    ary = []
    print "reading files..."
    RAILS_DEFAULT_LOGGER.fatal "reading files..."
    Find.find(Rails.root + 'scancache') do |file|
      next unless file.class == String
      ary += YAML.load(open(file)) if Pathname.new(file).basename.to_s.split('_').first.to_i == self.id
    end
    puts "reading complete. #{ary.size} works found."
    RAILS_DEFAULT_LOGGER.fatal "reading complete. #{ary.size} works found."
	begin
	  Scan.transaction do
	  ary.each do |res|
	    work = Work.find_by_wid res[:wid]
	    if work
	      work.update_attributes :cdate => res[:cdate],
		  :author_name => res[:author_name], :ac_type_id => res[:ac_type_id],
		  :pic_path => res[:pic_path] if work_update || work.pic_path.blank? || work.cdate.blank?
	    else
          work = Work.create! :name => res[:name],
                                           :wid => res[:wid],
                                           :cdate => res[:cdate],
                                           :ac_type_id => res[:ac_type_id],
                                           :author_name => res[:author_name],
                                           :pic_path => res[:pic_path] rescue p("#{res[:wid]} can't create")
	    end  
        Result.create! :work_id => work.id,
                     :scan_id => self.id,
                     :clicks =>  res[:clicks],
                     :comments => res[:comments],
                     :danmu => res[:danmu],
                     :stows => res[:stows],
                     :tj => res[:tj],
                     :yb => res[:yb]
    end
    end
    update_attribute :status, 2
    system "rm -rf #{Rails.root}/scancache/#{self.id}_*"
	  puts "done"
    RAILS_DEFAULT_LOGGER.fatal "done"
	rescue
	  p $!,$@
	end
  end

  def self.search_single ac_type, page_num
    require 'net/http/persistent'
    @protect_num = 0
    http = Net::HTTP::Persistent.new(nil, (PROXY ? URI.parse(PROXY) : nil))
    head_url = "http://#{BILI_PATH}/video/#{ac_type.page_name}"
    tmp_url = "#{head_url}-#{page_num}.html"
    search_again_and_again(tmp_url, http)    
  end

  def search_single_and_add ac_type, page_num
    add_results Scan.search_single(ac_type, page_num)   
  end
  
  def self.work_info_fixed listpg, ac_type_id, work_update=false
    title_a = listpg.css("a.title").first
    wid = Scan.get_wid title_a
    name = title_a.text
    cdate_div = listpg.css("div.date")
    cdate = cdate_div.search("a").last.text
    author_name =  cdate_div.search("a").first.text
    pic_path = listpg.search("img").first.get_attribute("src")
    work = Work.find_by_wid wid
    if work
        work.update_attributes :cdate => cdate,
                                              :author_name => author_name,
                                              :ac_type_id => ac_type_id,
                                              :pic_path => pic_path  if work_update || work.pic_path.blank? || work.cdate.blank?
    else
        work = Work.create! :name => name,
                                           :wid => wid,
                                           :cdate => cdate,
                                           :ac_type_id => ac_type_id,
                                           :author_name => author_name,
                                           :pic_path => pic_path rescue p("#{wid} can't create")
    end
    work
  end
  
  def add_results listpg
    title_a = listpg.css("a.title").first
    work = Work.find_by_wid(Scan.get_wid(title_a))
    if work
      data_div = listpg.css("div.w_info")
      Result.create! :work_id => work.id,
                     :scan_id => self.id,
                     :clicks =>  data_div.css("a.gk").text,
                     :comments => data_div.css("a.pl").text,
                     :danmu => data_div.css("a.dm").text,
                     :stows => data_div.css("a.sc").text,
                     :tj => data_div.css("a.pf").text,
                     :yb => data_div.css("a.yb").text
    end
  end
  
  def search_again_and_again url, count = 0
    begin
      print "again~#{count}" unless count == 0
      if @protect_num > 9
        sleep 0.5
        @protect_num = 0
      end
      re = Timeout.timeout(10, TimeoutError){@http.request(URI.parse(url))}
      @protect_num += 1
      raise if re.code == '503'
      raise if re.plain_body.empty?
      Nokogiri::XML(re.plain_body, nil ,'utf-8').root
    rescue TimeoutError
      restart_http
      search_again_and_again(url, count+1)
    rescue
      restart_http
      search_again_and_again(url, count+1)
    end
  end

  def list_again_and_again url, count = 0
    raise if count == 100
    tmp_page = search_again_and_again(url)
    tmp_lists = tmp_page/('list/data')
    tmp_lists = list_again_and_again url, count + 1 if tmp_lists.empty?
    tmp_lists
  end

  def to_cache listpgs, ac_type_id, page
    cache_path = Rails.root + "scancache/"
    results_ary = listpgs.inject([]) do |ary, listpg|
      wid = (listpg / "aid").text
      name = (listpg / "title").text
      cdate = (listpg / "create").text
      author_name = (listpg / "author").text
      mid = (listpg / "mid").text
      pic_path = (listpg / "pic").text
      ary << { :ac_type_id => ac_type_id, :wid => wid, :mid => mid,
                   :name => name, :cdate => cdate, :author_name => author_name, :pic_path => pic_path,
                   :clicks =>  (listpg/"play").text, :comments => (listpg/"review").text,
                   :danmu => (listpg/"video_review").text, :stows => (listpg/"favorites").text,
                   :tj => (listpg/"credit").text, :yb => (listpg/"coins").text }
    end
    File.open(cache_path + "#{self.id}_#{ac_type_id}_#{page}", "w") do |io|
      io.write results_ary.to_yaml
    end
  end

  #give up
  def self.get_wid title_a
    href = title_a.get_attribute("href")
    wid = href.split('/av')[1].to_i
    wid = href.split('id=').last if wid.zero?
    wid
  end

  def restart_http
      @http.shutdown
      sleep 3
      @http = Net::HTTP::Persistent.new(nil, (PROXY ? URI.parse(PROXY) : nil))
  end
end
