2016-08-20 77 views
1

我想從scrapy中的URL列表中爲每個刮取的網址生成一個csv文件。我明白我會修改pipeline.py,但是我所有的嘗試都失敗了。我不明白我怎麼可以通過URL被刮到管道,並將其用作輸出的名稱並相應地分割輸出。如何從scrapy中的URL列表中拆分輸出

任何幫助?

感謝

這裏的蜘蛛和管道

from scrapy import Spider 
from scrapy.selector import Selector 
from vApp.items import fItem 


class VappSpider(Spider): 

    name = "vApp" 
    allowed_domains = ["google.co.uk"] 
    start_urls = [l.strip() for l in open('data/listOfUrls.txt').readlines()] 


def parse(self, response): 

    trs = Selector(response).xpath('//[@id="incdiv"]/table/tbody/tr') 
    for tr in trs: 
     item = fItem() 

     try: 
      item['item'] = tr.xpath('td/text()').extract()[0] 
     except IndexError: 
      item['item'] = 'null' 

     yield item 

管道:

from scrapy import signals 
from scrapy.contrib.exporter import CsvItemExporter 

class VappPipeline(object): 
    def __init__(self): 
     self.files = {} 

@classmethod 
    def from_crawler(cls, crawler): 
     pipeline = cls() 
     crawler.signals.connect(pipeline.spider_opened, signals.spider_opened) 
     crawler.signals.connect(pipeline.spider_closed, signals.spider_closed) 
     return pipeline 

    def spider_opened(self, spider): 
     file = open('results/%s.csv' % spider.name, 'w+b') 
     self.files[spider] = file 
     self.exporter = CsvItemExporter(file) 
     self.exporter.fields_to_export = ['item'] 
     self.exporter.start_exporting() 

    def spider_closed(self, spider): 
     self.exporter.finish_exporting() 
     file = self.files.pop(spider) 
     file.close() 

    def process_item(self, item, spider): 
     self.exporter.export_item(item) 
     return item 

回答

0

,我認爲你應該做批量所有這些事情作爲一個後處理步驟時,你的爬行結束,而不是每個項目,但這裏是如何做你想做的草稿:

from scrapy import Spider 
from scrapy.selector import Selector 
from vApp.items import fItem 


class VappSpider(Spider): 

    name = "vApp" 
    allowed_domains = ["google.co.uk"] 
    start_urls = [l.strip() for l in open('data/listOfUrls.txt').readlines()] 


def parse(self, response): 

    trs = Selector(response).xpath('//[@id="incdiv"]/table/tbody/tr') 
    for tr in trs: 
     item = fItem() 

     try: 
      item['item'] = tr.xpath('td/text()').extract()[0] 
     except IndexError: 
      item['item'] = 'null' 
     item['url'] = response.url 
     yield item 


from scrapy import signals 
from scrapy.contrib.exporter import CsvItemExporter 
from urlparse import urlparse 

class VappPipeline(object): 
    def __init__(self): 
     self.files = {} 
     self.exporter = {} 

    @classmethod 
    def from_crawler(cls, crawler): 
     pipeline = cls() 
     crawler.signals.connect(pipeline.spider_closed, signals.spider_closed) 
     return pipeline 

    def process_item(self, item, spider): 
     url = item['url'] 
     parsed_uri = urlparse(url) 
     domain = parsed_uri.netloc 
     if domain not in self.exporter: 
      file = open('results/%s.csv' % domain, 'w+b') 
      self.files[domain] = file 
      self.exporter[domain] = CsvItemExporter(file) 
      self.exporter[domain].fields_to_export = ['item'] 
      self.exporter[domain].start_exporting() 

     assert domain in self.exporter 

     self.exporter[domain].export_item(item) 

     return item 

    def spider_closed(self, spider): 
     for domain, exporter in self.exporter.iteritems(): 
      exporter.finish_exporting() 
      self.files[domain].close() 
+0

Thanks @neverlastn。我試過你的解決方案,但卻讓我失去了錯誤。我想你是對的。抓取後我應該後處理數據。特別是在我發現每行的數據不是以有序的方式導出之後。抓取的表格的每一行似乎都沒有被有序地抓取,但是在抓取工具抓取另一個URL之後,會從其他地址中評估一些行。所以基本上我的表看起來像row1url1,row1url2,row2url1,row2url1 ... – gcc