Python爬虫爬取目标小说并保存到本地

利用Python爬虫爬取目标小说并保存到本地

小说地址:http://book.zongheng.com/showchapter/749819.html(目录地址)

通过小说目录获取小说所有章节对应的url地址,然后逐个访问解析得到每一章节小说的内容,最后保存到本地文件内

文章中的代码只是第一个版本,可以自行优化

例如:使用IP代理池防止IP地址被封禁

使用多线程对小说章节内容进行爬取可以提高爬取效率,降低运行时间

构建更加详细的requests请求头

代码还有诸多不足,欢迎指导

import requests
import bs4
from bs4 import BeautifulSoup
import lxml
import urllib


def getMuLu(Html):
    """
    函数getMuLu由主函数传入小说目录网址,经解析后返回每一章节的具体网址
    涉及内容:
        requests库:进行网页请求
        BeautifulSouping库:解析请求返回的网页内容
        时间:2020-05-15
 16         环境:Windows + python3.8
        工具:Pycharm
 21 
    """
    #构建请求头
    headers = {
        ‘User-Agent‘: ‘Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36‘
    }
    #使用requests中的get方法请求网址并转换为text格式
    demo = requests.get(Html)
    demo1 = demo.text

    #使用BeautifulSoup库对text格式的网页内容进行解析
    soup = BeautifulSoup(demo1, ‘html.parser‘)

    #将soup变量的值中的所有ID为list的标签返回到MuLu在转换为str类型后再次使用BeautifulSoup库进行解析
    MuLu = soup.find_all(class_ = ‘volume-list‘)
    soup1 = BeautifulSoup(str(MuLu), ‘html.parser‘)

    #对soup1解析后将所有的a标签进行取出并赋值href1
    href1 = soup1.find_all(‘li‘)
    soup2 = BeautifulSoup(str(href1), ‘html.parser‘)
    href2 = soup2.find_all(‘a‘)

    #将所有a标签取出后,将a标签中href属性的值存储到列表类型的Web3中
    Web3 = []
    for link in href2:
        Web1 = link.get(‘href‘)
        Web3.append(Web1)
    return Web3


def getText(TextUrl):

    """
       函数getText由主函数传入小说目录网址,经解析后返回小说目录以及正文
       涉及内容:
           requests库:进行网页请求
           BeautifulSouping库:解析请求返回的网页内容
           时间:2020-05-15

       """
    i = 0
    Mu = []
    for i in range(len(TextUrl)):
        headers = {
            ‘User-Agent‘: ‘Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36‘,
            ‘Cookie‘: ‘lianjia_uuid=9d3277d3-58e4-440e-bade-5069cb5203a4; ‘
                      ‘UM_distinctid=16ba37f7160390-05f17711c11c3e-454c0b2b-100200-16ba37f716618b; _smt_uid=5d176c66.5119839a; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2216ba37f7a942a6-0671dfdde0398a-454c0b2b-1049088-16ba37f7a95409%22%2C%22%24device_id%22%3A%2216ba37f7a942a6-0671dfdde0398a-454c0b2b-1049088-16ba37f7a95409%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D; _ga=GA1.2.1772719071.1561816174; Hm_lvt_9152f8221cb6243a53c83b956842be8a=1561822858; _jzqa=1.2532744094467475000.1561816167.1561822858.1561870561.3; CNZZDATA1253477573=987273979-1561811144-%7C1561865554; CNZZDATA1254525948=879163647-1561815364-%7C1561869382; CNZZDATA1255633284=1986996647-1561812900-%7C1561866923; CNZZDATA1255604082=891570058-1561813905-%7C1561866148; _qzja=1.1577983579.1561816168942.1561822857520.1561870561449.1561870561449.1561870847908.0.0.0.7.3; select_city=110000; lianjia_ssid=4e1fa281-1ebf-e1c1-ac56-32b3ec83f7ca; srcid=eyJ0Ijoie1wiZGF0YVwiOlwiMzQ2MDU5ZTQ0OWY4N2RiOTE4NjQ5YmQ0ZGRlMDAyZmFhODZmNjI1ZDQyNWU0OGQ3MjE3Yzk5NzFiYTY4ODM4ZThiZDNhZjliNGU4ODM4M2M3ODZhNDNiNjM1NzMzNjQ4ODY3MWVhMWFmNzFjMDVmMDY4NWMyMTM3MjIxYjBmYzhkYWE1MzIyNzFlOGMyOWFiYmQwZjBjYjcyNmIwOWEwYTNlMTY2MDI1NjkyOTBkNjQ1ZDkwNGM5ZDhkYTIyODU0ZmQzZjhjODhlNGQ1NGRkZTA0ZTBlZDFiNmIxOTE2YmU1NTIxNzhhMGQ3Yzk0ZjQ4NDBlZWI0YjlhYzFiYmJlZjJlNDQ5MDdlNzcxMzAwMmM1ODBlZDJkNmIwZmY0NDAwYmQxNjNjZDlhNmJkNDk3NGMzOTQxNTdkYjZlMjJkYjAxYjIzNjdmYzhiNzMxZDA1MGJlNjBmNzQxMTZjNDIzNFwiLFwia2V5X2lkXCI6XCIxXCIsXCJzaWduXCI6XCIzMGJlNDJiN1wifSIsInIiOiJodHRwczovL2JqLmxpYW5qaWEuY29tL3p1ZmFuZy9yY28zMS8iLCJvcyI6IndlYiIsInYiOiIwLjEifQ==‘
            }

        QingQiu = urllib.request.urlopen(TextUrl[i]).read()
        date = QingQiu.decode(‘utf-8‘)

        SoupText = BeautifulSoup(date,‘html.parser‘)
        #通过解析SoupText获取章节名称
        MingCheng = SoupText.find_all(class_ = ‘title_txtbox‘)
        MingCheng1 = BeautifulSoup(str(MingCheng),‘lxml‘)
        MingCheng2 = MingCheng1.get_text()
        ls4 = ‘‘.join(MingCheng2)

        #通过解析SoupText获取章节正文
        BiaoTi = SoupText.find_all(class_ = ‘content‘)     #在全部的html中查找class_ = ‘content‘的div标签

        BiaoTi1 = BeautifulSoup(str(BiaoTi), ‘lxml‘)
        BiaoTi2 = BiaoTi1.find_all(‘p‘)     #获取p标签

        #通过遍历p标签获取正文内容
        qbs = 0
        KongList = []
        for qbs in range(len(BiaoTi2)):
            ZhangJie = BiaoTi2[qbs]
            S = BeautifulSoup(str(ZhangJie), ‘html.parser‘)
            str1 = S.get_text()
            KongList.append(str1)
            qbs += 1

        #将列表转换为字符串类型
        ls3 = ‘‘.join(KongList)
        #通过组合返回章节名称以及正文内容

        #将两个字符串类型数据组合为一个并返回
        NeiRong = ls4 + ls3


        #将所有内容写入列表Mu并返回
        Mu.append(NeiRong)

    return Mu

def BaoCunText(WenBen):
    """
       函数BaoCunText由主函数传入小说目录以及内容,写入txt文件
       涉及内容:
           文件处理:
                打开,写入,关闭文件
            for遍历
        时间:2020-05-16
       """
    #打开文件
    FlieText = open(‘MiMiShiMing.txt‘,‘a‘,encoding=‘utf-8‘)
    #遍历列表WenBen,并利用索引写入文件,在每章节后换行
    i = 0
    for i in range(len(WenBen)):
        FlieText.write(str(WenBen[i]))
        FlieText.write(‘\n‘)
        print("第{}章写入成功".format(i))
        i += 1
    print("写入完成")
    #关闭文件
    FlieText.close()

if __name__ == ‘__main__‘:
    """
    主函数:调用其他函数以及向函数传值
135     时间:2020-5-17
    
    
    """

    url = ‘http://book.zongheng.com/showchapter/749819.html‘
    MuLuLianjie = getMuLu(url)
    XiaoShuo = getText(MuLuLianjie)
    BaoCunText(XiaoShuo)