请求的手动发送
pipelines.py
class SecondPipeline(object): f = None
def open_spider(self,spider): print('start') self.f = open('./qiubai.text','w',encoding='utf-8')
def process_item(self, item, spider): self.f.write(item['author']+':'+ item['content']) return item
def close_spider(self,spider): self.f.close() print('end')
qiubai.py
import scrapy from second.items import SecondItem
class QiubaiSpider(scrapy.Spider): name = 'qiubai' # allowed_domains = ['www.qiushibaike.com'] start_urls = ['https://www.qiushibaike.com/text/']
url = 'https://www.qiushibaike.com/text/page/%d/' pageNum = 1
def parse(self, response): print('正在爬虫') div_list = response.xpath("//div[@id='content-left']/div") for div in div_list: author = div.xpath('./div/a[2]/h2/text()').extract_first() content = div.xpath(".//div[@class='content']/span/text()").extract_first()
items = SecondItem() items['author'] = author items['content'] = content
yield items
# 执行到此,第一个url已经获取成功, # 手动添加url,yield scrapy.Request, # callback 函数解析,可以自定义,也可以再次利用parse,不加(),递归加条件 # url = 'https://www.qiushibaike.com/text/page/2/' # yield scrapy.Request(url=url, callback=self.parse)
if self.pageNum <= 13: self.pageNum += 1 new_url = format(self.url % self.pageNum) yield scrapy.Request(url=new_url, callback=self.parse)
小结:
- 多个url手动添加,callback函数执行
- 页面布局一致时,循环调用解析函数递归注意终止条件
- yield scrapy.Request
- 注意面向对象 数据属性 与实例化对象 结合的特性!
- format函数的使用
神龙|纯净稳定代理IP免费测试>>>>>>>>天启|企业级代理IP免费测试>>>>>>>>IPIPGO|全球住宅代理IP免费测试