发起post请求
- 改写def start_requests(self):
- 封装参数字典格式
- 回调函数yield scrapy.FormRequest(url=url, formdata=data, callback=self.parse)
class PostdemoSpider(scrapy.Spider): name = 'postDemo' # allowed_domains = ['www.baidu.com'] start_urls = ['https://fanyi.baidu.com/sug']
''' 最开始是有start_request函数的,默认是get请求 def start_requests(self): for url in self.start_urls: yield scrapy.Request(url=url,callback=self.parse)
发起post请求常见方式: 一定要对start_requests方法进行重写。 Request()方法中给method属性赋值成post FormRequest()进行post请求的发送
'''
def start_requests(self): print('start_request') data = {'kw': 'dog'}
for url in self.start_urls: yield scrapy.FormRequest(url=url, formdata=data, callback=self.parse)
def parse(self, response): # print(response.text) pass
cookie
- post请求之后自动处理cookie,直接处理相关页面即可,注意callback
- 一定要注意新建工程的设置
import scrapy
class DoubanSpider(scrapy.Spider): name = 'douban' # allowed_domains = ['www.douban.com'] start_urls = ['https://accounts.douban.com/j/mobile/login/basic']
def start_requests(self): print('start') for url in self.start_urls: data={ 'ck':'', 'name':'', 'password':'', 'remember':'false', 'ticket':'' }
yield scrapy.FormRequest(url=url,formdata=data,callback=self.parse)
def parse(self, response): print('登陆') url='https://www.douban.com/people/193627830/' yield scrapy.Request(url=url, callback=self.parseBySecond)
def parseBySecond(self,response): print('写入') with open ('./test.html','w',encoding='utf-8') as f: f.write(response.text)
代理
- 就是在中间件里自定义一个类,然后写个process_request
- request.meta[‘proxy’] =‘https://代理ip'
- settings里设置,每次请求都会被更改
class DoubanSpider(scrapy.Spider): name = 'douban' # allowed_domains = ['www.douban.com'] start_urls = ['https://www.baidu.com/s?wd=ip']
def parse(self, response): with open('baidu.html','w',encoding='utf-8') as f: f.write(response.text)
DOWNLOADER_MIDDLEWARES = { 'three.middlewares.MyProxies': 543, }
class MyProxies(object): def process_request(self, request, spider): request.meta['proxy'] = 'https://157.230.150.101:8080'
神龙|纯净稳定代理IP免费测试>>>>>>>>天启|企业级代理IP免费测试>>>>>>>>IPIPGO|全球住宅代理IP免费测试