Python如何抓取天猫商品详细信息及交易记录


Posted in Python onFebruary 23, 2018

本文实例为大家分享了Python抓取天猫商品详细信息及交易记录的具体代码,供大家参考,具体内容如下

一、搭建Python环境

本帖使用的是Python 2.7
涉及到的模块:spynner, scrapy, bs4, pymmssql

二、要获取的天猫数据

三、数据抓取流程

四、源代码

#coding:utf-8
import spynner
from scrapy.selector import Selector
from bs4 import BeautifulSoup
import random
import pymssql


#------------------------接数据库-----------------------------#
server="localhost"
user="sa"
password = "123456"
conn=pymssql.connect(server,user,password,"TmallData")
if conn:
  print "DataBase connecting successfully!"
else:
  print "DataBase connecting error!"
cursor=conn.cursor()
#----------------------定义网页操作函数--------------------------#
def py_click_element(browser,pos):
  #点击网页中的元素
  #pos example:'a[href="#description" rel="external nofollow" rel="external nofollow" ]'
  browser.click(pos)
  browser.wait(random.randint(3,10))
  return browser

def py_click_xpath(browser,xpath):
  xpath=xpath+'/@href'
  inner_href=Selector(text=browser.html).xpath(xpath).extract()
  pos='a[href="'+str(inner_href[0])+'" rel="external nofollow" ]'
  browser=py_click_element(browser, pos)
  return browser

def py_webpage_load(browser,url):
  browser.load(url,load_timeout=60)
  browser.wait(10)
  return browser

def py_check_element(browser,xpath):
  #按照xpath查找元素,如果存在则返回True,否则返回False
  if Selector(text=browser.html).xpath(xpath).extract()!=[]:
    return True
  else:
    return False

def py_extract_xpath(browser,xpath):
  if py_check_element(browser, xpath):
    return Selector(text=browser.html).xpath(xpath).extract()[0]
  else:
    return "none"

def py_extract_xpaths(browser,xpaths):
  #批量提取网页内容
  length=len(xpaths)
  results=[0]*length
  for i in range(length):
    results[i]=py_extract_xpath(browser, xpaths[i])
  return results

#-----------------------------数据库操作函数---------------------------#


#-----------------------------数据提取函数----------------------------#
def py_getDealReord(doc):
  soup=BeautifulSoup(doc,'lxml')
  tr=soup.find_all('tr')
  total_dealRecord=[([0]*5)for i in range(len(tr))] 
  i=-1
  for this_tr in tr:
    i=i+1
    td_user=this_tr.find_all('td',attrs={'class':"cell-align-l buyer"})
    for this_td in td_user:
      total_dealRecord[i][0]=this_td.getText().strip(' ')
      #print username
    td_style=this_tr.find_all('td',attrs={'class':"cell-align-l style"})
    for this_td in td_style:
      total_dealRecord[i][1]=this_td.getText(',').strip(' ')
      #print style
    td_quantity=this_tr.find_all('td',attrs={'class':"quantity"})
    for this_td in td_quantity:
      total_dealRecord[i][2]=this_td.getText().strip(' ')
      #print quantity
    td_dealtime=this_tr.find_all('td',attrs={'class':"dealtime"})
    for this_td in td_dealtime:
      total_dealRecord[i][3]=this_td.find('p',attrs={'class':"date"}).getText()
      total_dealRecord[i][4]=this_td.find('p',attrs={'class':"time"}).getText()
  return total_dealRecord
#--------------------获取要抓取的所有商品链接-----------------------#
cursor.execute("""
select * from ProductURLs where BrandName='NB'
""")


file=open("H:\\Eclipse\\TmallCrawling\\HTMLParse\\errLog.txt")
InProductInfo=cursor.fetchall()
browser=spynner.Browser()
for temp_InProductInfo in InProductInfo:

  url='https:'+temp_InProductInfo[2]

  BrandName=temp_InProductInfo[0]
  ProductType=temp_InProductInfo[1]
  print BrandName,'\t',ProductType,'\t',url
  #url= 'https://detail.tmall.com/item.htm?id=524425656711&rn=77636d6db8dea5e30060976fdaf9768d&abbucket=19' 

  try:
    browser=py_webpage_load(browser, url)
  except:
    print "Loading webpage failed."
    file.write(url)
    file.write('\n')
    continue

  xpaths=['//*[@id="J_PromoPrice"]/dd/div/span/text()',\
    '//*[@id="J_StrPriceModBox"]/dd/span/text()',\
    '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/div[1]/h1/text()',\
    '//*[@id="J_PostageToggleCont"]/p/span/text()',\
    '//*[@id="J_EmStock"]/text()',\
    '//*[@id="J_CollectCount"]/text()',\
    '//*[@id="J_ItemRates"]/div/span[2]/text()',\
    '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/ul/li[1]/div/span[2]/text()']
  out_ProductInfo=py_extract_xpaths(browser,xpaths)
  browser=py_click_element(browser,'a[href="#description" rel="external nofollow" rel="external nofollow" ]')
  ProductProperty=py_extract_xpath(browser, '//*[@id="J_AttrUL"]')
  soup=BeautifulSoup(ProductProperty,'lxml')
  li=soup.find_all('li')
  prop=''
  for this_li in li:
    prop=prop+this_li.getText()+'\\'
  prop=prop[0:len(prop)-1]
  out_ProductProperty=prop
  print out_ProductProperty
  cursor.execute("""
  Insert into py_ProductInfo values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
  """,(BrandName,ProductType,url,\
     out_ProductInfo[2],out_ProductInfo[1],\
     out_ProductInfo[0],out_ProductInfo[7],\
     out_ProductInfo[1],out_ProductInfo[3],\
     out_ProductInfo[4],out_ProductInfo[5],\
     out_ProductProperty))
  conn.commit()
  Deal_PageCount=0
  browser=py_click_element(browser, 'a[href="#J_DealRecord" rel="external nofollow" ]')
  #browser.browse(True)
  DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')
  out_DealRecord=py_getDealReord(DealRecord)
  for temp_DealRecord in out_DealRecord:
    if str(temp_DealRecord[4])=='0':
      continue
    cursor.execute("""
    Insert into DealRecord values(%s,%s,%s,%s,%s,%s)
    """,(url,temp_DealRecord[0],temp_DealRecord[1],\
       temp_DealRecord[2],temp_DealRecord[3],\
       temp_DealRecord[4]))
    conn.commit()
  Deal_PageCount=Deal_PageCount+1
  print "Page ",Deal_PageCount
  for i in range(6):
    if (i==0) or (i==2):
      continue
    xpath='//*[@id="J_showBuyerList"]/div/div/a['+str(i)+']'
    if py_check_element(browser,xpath):
      browser=py_click_xpath(browser, xpath)
      DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')
      out_DealRecord=py_getDealReord(DealRecord)
      for temp_DealRecord in out_DealRecord:
        if str(temp_DealRecord[4])=='0':
          continue
        cursor.execute("""
        Insert into DealRecord values(%s,%s,%s,%s,%s,%s)
        """,(url,temp_DealRecord[0],temp_DealRecord[1],\
           temp_DealRecord[2],temp_DealRecord[3],\
           temp_DealRecord[4]))
        conn.commit()
      Deal_PageCount=Deal_PageCount+1
      print "Page ",Deal_PageCount
  while py_check_element(browser, '//*[@id="J_showBuyerList"]/div/div/a[6]'):
    browser=py_click_xpath(browser, '//*[@id="J_showBuyerList"]/div/div/a[6]')
    DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')
    out_DealRecord=py_getDealReord(DealRecord)
    for temp_DealRecord in out_DealRecord:
      if str(temp_DealRecord[4])=='0':
        continue
      cursor.execute("""
      Insert into DealRecord values(%s,%s,%s,%s,%s,%s)
      """,(url,temp_DealRecord[0],temp_DealRecord[1],\
         temp_DealRecord[2],temp_DealRecord[3],\
         temp_DealRecord[4]))
      conn.commit()
    Deal_PageCount=Deal_PageCount+1
    print "Page ",Deal_PageCount

以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持三水点靠木。

Python 相关文章推荐
python基于Tkinter库实现简单文本编辑器实例
May 05 Python
python实现域名系统(DNS)正向查询的方法
Apr 19 Python
python语言使用技巧分享
May 31 Python
Python制作刷网页流量工具
Apr 23 Python
python 实现将文件或文件夹用相对路径打包为 tar.gz 文件的方法
Jun 10 Python
python版百度语音识别功能
Jul 09 Python
在PyCharm的 Terminal(终端)切换Python版本的方法
Aug 02 Python
Python如何获取Win7,Win10系统缩放大小
Jan 10 Python
Python request操作步骤及代码实例
Apr 13 Python
解决keras backend 越跑越慢问题
Jun 18 Python
pycharm 实现调试窗口恢复
Feb 05 Python
Python plt 利用subplot 实现在一张画布同时画多张图
Feb 26 Python
python列表生成式与列表生成器的使用
Feb 23 #Python
1分钟快速生成用于网页内容提取的xslt
Feb 23 #Python
python使用xslt提取网页数据的方法
Feb 23 #Python
Python爬虫使用Selenium+PhantomJS抓取Ajax和动态HTML内容
Feb 23 #Python
python爬虫获取多页天涯帖子
Feb 23 #Python
Python即时网络爬虫项目启动说明详解
Feb 23 #Python
Python爬豆瓣电影实例
Feb 23 #Python
You might like
php学习之流程控制实现代码
2011/06/09 PHP
ThinkPHP实现登录退出功能
2017/06/29 PHP
js之WEB开发调试利器:Firebug 下载
2007/01/13 Javascript
精通Javascript系列之Javascript基础篇
2011/06/07 Javascript
JS根据变量保存方法名并执行方法示例
2014/04/04 Javascript
理运用命名空间让js不产生冲突避免全局变量的泛滥
2014/06/15 Javascript
JavaScript如何禁止Backspace键
2015/12/02 Javascript
js创建对象几种方式的优缺点对比
2016/09/28 Javascript
AngularJS的Filter的示例详解
2017/03/07 Javascript
Bootstrap 表单验证formValidation 实现远程验证功能
2017/05/17 Javascript
js实现随机点名小功能
2017/08/17 Javascript
Js利用Canvas实现图片压缩功能
2017/09/13 Javascript
微信小程序开发之好友列表字母列表跳转对应位置
2017/09/26 Javascript
jQuery简单实现向列表动态添加新元素的方法示例
2017/12/25 jQuery
vue 路由懒加载中给 Webpack Chunks 命名的方法
2020/04/24 Javascript
JS可断点续传文件上传实现代码解析
2020/07/30 Javascript
你不知道的 TypeScript 高级类型(小结)
2020/08/28 Javascript
ant-design-vue中的select选择器,对输入值的进行筛选操作
2020/10/24 Javascript
跟老齐学Python之正规地说一句话
2014/09/28 Python
Python 转义字符详细介绍
2017/03/21 Python
Python面向对象类的继承实例详解
2018/06/27 Python
python调用百度REST API实现语音识别
2018/08/30 Python
python 画二维、三维点之间的线段实现方法
2019/07/07 Python
Python如何向SQLServer存储二进制图片
2020/06/08 Python
python3 中使用urllib问题以及urllib详解
2020/08/03 Python
浅析图片上传及canvas压缩的流程
2020/06/10 HTML / CSS
html5关于外链嵌入页面通信问题(postMessage解决跨域通信)
2020/07/20 HTML / CSS
泰国办公用品购物网站:OfficeMate
2018/02/04 全球购物
7 For All Mankind官网:美国加州洛杉矶的高级牛仔服装品牌
2018/12/20 全球购物
英国在线泳装店:Simply Swim
2019/05/05 全球购物
《夜晚的实验》教学反思
2014/02/19 职场文书
事业单位分类改革实施方案
2014/03/21 职场文书
中专生自荐信
2014/06/25 职场文书
党员群众路线对照检查材料
2014/08/31 职场文书
2015年综治维稳工作总结
2015/04/07 职场文书
迎新晚会主持词开场白
2015/05/28 职场文书