python 爬虫入门
import requestsimport re# TODO 下载 每一个小说的首页url# TODO 大循环# 1. 下载小说首页novel_url = ‘http://www.jingcaiyuedu.com/book/15205/list.html’response = requests.get(novel_url)# 处理字符编码 显式的指定,response.encoding = ‘utf-8’html = response.text # 字符串# print(html)# 2. 提取 章节url 非贪婪匹配title = re.findall(r'<meta name=”keywords” content=”《(.*?)》’,html)[0]# print(title)# id = list dl 有两个dl = re.findall(r”,html)[1]# print(dl)chapter_info_list = re.findall(r”,dl)# print(chapter_info_list)# 数据持久化 写入txtfb = open(‘%s.txt’%title,’w’,encoding=’utf-8′)# 3. 循环的去访问每个章节,提取内容for chapter_info in chapter_info_list: chapter_url = chapter_info[0] chapter_title = chapter_info[1] # 处理 相对url if ‘http’ not in chapter_url: chapter_url = ‘http://www.jingcaiyuedu.com%s’ % chapter_url # 下载章节页面 chapter_response = requests.get(chapter_url) chapter_response.encoding = “utf-8″ chapter_html = chapter_response.text # print(chapter_response.text) # 提取内容 chapter_content = re.findall(r'(.*?)’,chapter_html)[0] # 清洗数据,把多余的字符处理掉 chapter_content = chapter_content.replace(‘ ‘,”) chapter_content = chapter_content.replace(‘ ‘,”) chapter_content = chapter_content.replace(‘ ‘,”) chapter_content = chapter_content.replace(‘ ‘,”) # print(chapter_content) # 写入文件 fb.write(chapter_title) fb.write(‘n’) fb.write(chapter_content) fb.write(‘n’) # chapter_response.close() print(chapter_url) # exit()
神龙|纯净稳定代理IP免费测试>>>>>>>>天启|企业级代理IP免费测试>>>>>>>>IPIPGO|全球住宅代理IP免费测试