亲宝软件园·资讯

展开

Python爬虫爬取、解析数据 Python爬虫爬取、解析数据操作示例

OldKind超 人气:0

本文实例讲述了Python爬虫爬取、解析数据操作。分享给大家供大家参考,具体如下:

爬虫 当当网 http://search.dangdang.com/?key=python&act=input&page_index=1

  1. 获取书籍相关信息
  2. 面向对象思想
  3. 利用不同解析方式和存储方式

引用相关库

import requests
import re
import csv
import pymysql
from bs4 import BeautifulSoup
from lxml import etree
import lxml
from lxml import html

类代码实现部分

class DDSpider(object):
  #对象属性 参数 关键字 页数
  def __init__(self,key='python',page=1):
    self.url = 'http://search.dangdang.com/?key='+key+'&act=input&page_index={}'
    self.page = page
    self.headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36'}

    
  #私有对象方法
  def __my_url(self):
    my_url = []
    if self.page < 1:
      my_page = 2
    else:
      my_page = self.page+1
    #循环遍历每一页
    for i in range(1,my_page):
      my_url.append(self.url.format(i))
    return my_url
  
  #私有对象方法 请求数据
  def __my_request(self,url,parser_type):
    #循环遍历每一页
    response = requests.get(url=url,headers=self.headers)
    if response.status_code == 200:
      return self.__my_parser(response.text,parser_type)
    else:
      return None
    
  #私有对象方法 解析数据 1 利用正则 2 bs4 3 xpath
  def __my_parser(self,html,my_type=1):
    if my_type == 1:
      pattern = re.compile('<p.*?class=[\'\"]name[\'\"].*?name=[\'\"]title[\'\"].*?<a.*?title=[\'\"](.*?)[\'\"].*?href=[\'\"](.*?)[\'\"].*?name=[\'\"]itemlist-title[\'\"].*?<p class=[\'\"]detail[\'\"].*?>(.*?)</p>.*?<span.*?class=[\'\"]search_now_price[\'\"].*?>(.*?)</span>.*?<p.*?class=[\'\"]search_book_author[\'\"].*?><span>.*?<a.*?name=[\'\"]itemlist-author[\'\"].*?title=[\'\"](.*?)[\'\"].*?</span>',re.S)
      result = re.findall(pattern,html)
    elif my_type == 2:
      soup = BeautifulSoup(html,'lxml')
      result = []
      title_url = soup.find_all('a',attrs={'name':'itemlist-title'})
      for i in range(0,len(title_url)):
        title = soup.find_all('a',attrs={'name':'itemlist-title'})[i].attrs['title']
        url = soup.find_all('a',attrs={'name':'itemlist-title'})[i].attrs['href']
        price = soup.find_all('span',attrs={'class':'search_now_price'})[i].get_text()
        author = soup.find_all('a',attrs={'name':'itemlist-author'})[i].attrs['title']
        desc = soup.find_all('p',attrs={'class':'detail'})[i].get_text()
        my_tuple = (title,url,desc,price,author)
        result.append(my_tuple)
    else:
      html = etree.HTML(html)
      li_all = html.xpath('//div[@id="search_nature_rg"]/ul/li')
      result = []
      for i in range(len(li_all)):
        title = html.xpath('//div[@id="search_nature_rg"]/ul/li[{}]/p[@class="name"]/a/@title'.format(i+1))
        url = html.xpath('//div[@id="search_nature_rg"]/ul/li[{}]/p[@class="name"]/a/@href'.format(i+1))
        price = html.xpath('//div[@id="search_nature_rg"]/ul/li[{}]//span[@class="search_now_price"]/text()'.format(i+1))
        author_num = html.xpath('//div[@id="search_nature_rg"]/ul/li[{}]/p[@class="search_book_author"]/span[1]/a'.format(i+1))
        if len(author_num) != 0:
          #有作者 a标签
          author = html.xpath('//div[@id="search_nature_rg"]/ul/li[{}]/p[@class="search_book_author"]/span[1]/a[1]/@title'.format(i+1))
        else:
          #没有作者 a标签
          author = html.xpath('//div[@id="search_nature_rg"]/ul/li[{}]/p[@class="search_book_author"]/span[1]/text()'.format(i+1))
        desc = html.xpath('//div[@id="search_nature_rg"]/ul/li[{}]/p[@class="detail"]/text()'.format(i+1))
        my_tuple = (" ".join(title)," ".join(url)," ".join(desc)," ".join(price)," ".join(author))
        result.append(my_tuple)
        
    return result
  
  #私有对象方法 存储数据 1 txt 2 csv 3 mysql
  def __my_save(self,data,save_type=1):
    #循环遍历
    for value in data:
      if save_type == 1:
        with open('ddw.txt','a+',encoding="utf-8") as f:
          f.write('【名称】:{}【作者】:{}【价格】:{}【简介】:{}【链接】:{}'.format(value[0],value[4],value[3],value[2],value[1]))
      elif save_type == 2:
        with open('ddw.csv','a+',newline='',encoding='utf-8-sig') as f:
          writer = csv.writer(f)
          #转化为列表 存储
          writer.writerow(list(value))
      else:
        conn = pymysql.connect(host='127.0.0.1',user='root',passwd='',db='',port=3306,charset='utf8')
        cursor = conn.cursor()
        sql = ''
        cursor.execute(sql)
        conn.commit()
        cursor.close()
        conn.close()
  #公有对象方法 执行所有爬虫操作
  def my_run(self,parser_type=1,save_type=1):
    my_url = self.__my_url()
    for value in my_url:
      result = self.__my_request(value,parser_type)
      self.__my_save(result,save_type)

调用爬虫类实现数据获取

if __name__ == '__main__':
  #实例化创建对象
  dd = DDSpider('python',0)
  #参数 解析方式 my_run(parser_type,save_type)
  # parser_type 1 利用正则 2 bs4 3 xpath 
  #存储方式 save_type 1 txt 2 csv 3 mysql
  dd.my_run(2,1)

==总结一下: ==

1. 总体感觉正则表达式更简便一些 , 代码也会更简便 , 但是正则部分相对复杂和困难
2. bs4和xpath 需要对html代码有一定了解 , 取每条数据多个值时相对较繁琐

希望本文所述对大家Python程序设计有所帮助。

加载全部内容

相关教程
猜你喜欢
用户评论