利用Python爬取博客园有关爬虫的文章
爬取博客园有关爬虫的文章

有需要Python学习资料的小伙伴吗?小编整理【一套Python资料、源码和PDF】,感兴趣者可以关注小编后私信学习资料(是关注后私信哦)反正闲着也是闲着呢,不如学点东西啦
# coding: utf-8
import requests
from bs4 import BeautifulSoup
# 定义变量,这里可以用一个
d = 1
p = 0
k = 0
Reptilian_url = []
Reptilian_title = []
# 打开session
s = requests.session()
# 伪装头部
h = {
 'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.4094.1 Safari/537.36',
 'accept-encoding':'gzip, deflate, br'
}
# 从第1页到最大页数的url,总共有200页,这个数值也可以从页面读取
while d in range(200):
 url = 'https://www.cnblogs.com/cate/python/%d'% d
 r = s.get(url, headers=h)
 t = r.text
 soup = BeautifulSoup(t, 'lxml')
 list = soup.select('div.post_item_body h3')
 list2 = soup.find_all(attrs={'class':"titlelnk"})
 while p < d:
 # 把标题写入空列表
 for z1 in list:
 Reptilian_title.append(z1.string)
 # 把url写入空列表
 for u1 in list2:
 Reptilian_url.append(u1['href'])
 p += 1
 d += 1
# 打印爬取的总数据量
print(len(Reptilian_title))
# 加入筛选条件,包含爬虫字段的帖子
while k < len(Reptilian_title):
 if '爬虫' in Reptilian_title[k]:
 print(k+1,'%s'%Reptilian_title[k],'','%s'%Reptilian_url[k])
 print('===============================')
 k += 1可以给中间加一小段,写到文档里,保存下来偷偷看。
Reptilian = open('1.txt', 'a')
Reptilian.writelines(['标题:',Reptilian_title,' ','url:',Reptilian_url])
Reptilian.close() 相关推荐
  tenvainvi    2019-12-21  
   Kingcxx    2019-12-21  
   James0    2019-12-15  
   pengkunstone    2019-12-14  
   xiangxiaojun    2019-12-11  
   somboy    2019-12-06  
   福叔    2019-12-05  
   Hesland    2019-11-12  
   sdbxpjzq    2019-11-04  
   sdbxpjzq    2019-10-27  
   Xhj    2019-06-30  
   ITxiaobaibai    2019-03-20  
   CloudXli    2016-06-19  
   Moswen    2011-12-01  
   sandyhmily    2013-07-05  
   lancanfei    2015-09-28  
   85510394    2013-01-06