Python如何抓取天猫商品详细信息及交易记录


Posted in Python onFebruary 23, 2018

本文实例为大家分享了Python抓取天猫商品详细信息及交易记录的具体代码,供大家参考,具体内容如下

一、搭建Python环境

本帖使用的是Python 2.7
涉及到的模块:spynner, scrapy, bs4, pymmssql

二、要获取的天猫数据

三、数据抓取流程

四、源代码

#coding:utf-8
import spynner
from scrapy.selector import Selector
from bs4 import BeautifulSoup
import random
import pymssql


#------------------------接数据库-----------------------------#
server="localhost"
user="sa"
password = "123456"
conn=pymssql.connect(server,user,password,"TmallData")
if conn:
  print "DataBase connecting successfully!"
else:
  print "DataBase connecting error!"
cursor=conn.cursor()
#----------------------定义网页操作函数--------------------------#
def py_click_element(browser,pos):
  #点击网页中的元素
  #pos example:'a[href="#description" rel="external nofollow" rel="external nofollow" ]'
  browser.click(pos)
  browser.wait(random.randint(3,10))
  return browser

def py_click_xpath(browser,xpath):
  xpath=xpath+'/@href'
  inner_href=Selector(text=browser.html).xpath(xpath).extract()
  pos='a[href="'+str(inner_href[0])+'" rel="external nofollow" ]'
  browser=py_click_element(browser, pos)
  return browser

def py_webpage_load(browser,url):
  browser.load(url,load_timeout=60)
  browser.wait(10)
  return browser

def py_check_element(browser,xpath):
  #按照xpath查找元素,如果存在则返回True,否则返回False
  if Selector(text=browser.html).xpath(xpath).extract()!=[]:
    return True
  else:
    return False

def py_extract_xpath(browser,xpath):
  if py_check_element(browser, xpath):
    return Selector(text=browser.html).xpath(xpath).extract()[0]
  else:
    return "none"

def py_extract_xpaths(browser,xpaths):
  #批量提取网页内容
  length=len(xpaths)
  results=[0]*length
  for i in range(length):
    results[i]=py_extract_xpath(browser, xpaths[i])
  return results

#-----------------------------数据库操作函数---------------------------#


#-----------------------------数据提取函数----------------------------#
def py_getDealReord(doc):
  soup=BeautifulSoup(doc,'lxml')
  tr=soup.find_all('tr')
  total_dealRecord=[([0]*5)for i in range(len(tr))] 
  i=-1
  for this_tr in tr:
    i=i+1
    td_user=this_tr.find_all('td',attrs={'class':"cell-align-l buyer"})
    for this_td in td_user:
      total_dealRecord[i][0]=this_td.getText().strip(' ')
      #print username
    td_style=this_tr.find_all('td',attrs={'class':"cell-align-l style"})
    for this_td in td_style:
      total_dealRecord[i][1]=this_td.getText(',').strip(' ')
      #print style
    td_quantity=this_tr.find_all('td',attrs={'class':"quantity"})
    for this_td in td_quantity:
      total_dealRecord[i][2]=this_td.getText().strip(' ')
      #print quantity
    td_dealtime=this_tr.find_all('td',attrs={'class':"dealtime"})
    for this_td in td_dealtime:
      total_dealRecord[i][3]=this_td.find('p',attrs={'class':"date"}).getText()
      total_dealRecord[i][4]=this_td.find('p',attrs={'class':"time"}).getText()
  return total_dealRecord
#--------------------获取要抓取的所有商品链接-----------------------#
cursor.execute("""
select * from ProductURLs where BrandName='NB'
""")


file=open("H:\\Eclipse\\TmallCrawling\\HTMLParse\\errLog.txt")
InProductInfo=cursor.fetchall()
browser=spynner.Browser()
for temp_InProductInfo in InProductInfo:

  url='https:'+temp_InProductInfo[2]

  BrandName=temp_InProductInfo[0]
  ProductType=temp_InProductInfo[1]
  print BrandName,'\t',ProductType,'\t',url
  #url= 'https://detail.tmall.com/item.htm?id=524425656711&rn=77636d6db8dea5e30060976fdaf9768d&abbucket=19' 

  try:
    browser=py_webpage_load(browser, url)
  except:
    print "Loading webpage failed."
    file.write(url)
    file.write('\n')
    continue

  xpaths=['//*[@id="J_PromoPrice"]/dd/div/span/text()',\
    '//*[@id="J_StrPriceModBox"]/dd/span/text()',\
    '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/div[1]/h1/text()',\
    '//*[@id="J_PostageToggleCont"]/p/span/text()',\
    '//*[@id="J_EmStock"]/text()',\
    '//*[@id="J_CollectCount"]/text()',\
    '//*[@id="J_ItemRates"]/div/span[2]/text()',\
    '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/ul/li[1]/div/span[2]/text()']
  out_ProductInfo=py_extract_xpaths(browser,xpaths)
  browser=py_click_element(browser,'a[href="#description" rel="external nofollow" rel="external nofollow" ]')
  ProductProperty=py_extract_xpath(browser, '//*[@id="J_AttrUL"]')
  soup=BeautifulSoup(ProductProperty,'lxml')
  li=soup.find_all('li')
  prop=''
  for this_li in li:
    prop=prop+this_li.getText()+'\\'
  prop=prop[0:len(prop)-1]
  out_ProductProperty=prop
  print out_ProductProperty
  cursor.execute("""
  Insert into py_ProductInfo values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
  """,(BrandName,ProductType,url,\
     out_ProductInfo[2],out_ProductInfo[1],\
     out_ProductInfo[0],out_ProductInfo[7],\
     out_ProductInfo[1],out_ProductInfo[3],\
     out_ProductInfo[4],out_ProductInfo[5],\
     out_ProductProperty))
  conn.commit()
  Deal_PageCount=0
  browser=py_click_element(browser, 'a[href="#J_DealRecord" rel="external nofollow" ]')
  #browser.browse(True)
  DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')
  out_DealRecord=py_getDealReord(DealRecord)
  for temp_DealRecord in out_DealRecord:
    if str(temp_DealRecord[4])=='0':
      continue
    cursor.execute("""
    Insert into DealRecord values(%s,%s,%s,%s,%s,%s)
    """,(url,temp_DealRecord[0],temp_DealRecord[1],\
       temp_DealRecord[2],temp_DealRecord[3],\
       temp_DealRecord[4]))
    conn.commit()
  Deal_PageCount=Deal_PageCount+1
  print "Page ",Deal_PageCount
  for i in range(6):
    if (i==0) or (i==2):
      continue
    xpath='//*[@id="J_showBuyerList"]/div/div/a['+str(i)+']'
    if py_check_element(browser,xpath):
      browser=py_click_xpath(browser, xpath)
      DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')
      out_DealRecord=py_getDealReord(DealRecord)
      for temp_DealRecord in out_DealRecord:
        if str(temp_DealRecord[4])=='0':
          continue
        cursor.execute("""
        Insert into DealRecord values(%s,%s,%s,%s,%s,%s)
        """,(url,temp_DealRecord[0],temp_DealRecord[1],\
           temp_DealRecord[2],temp_DealRecord[3],\
           temp_DealRecord[4]))
        conn.commit()
      Deal_PageCount=Deal_PageCount+1
      print "Page ",Deal_PageCount
  while py_check_element(browser, '//*[@id="J_showBuyerList"]/div/div/a[6]'):
    browser=py_click_xpath(browser, '//*[@id="J_showBuyerList"]/div/div/a[6]')
    DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')
    out_DealRecord=py_getDealReord(DealRecord)
    for temp_DealRecord in out_DealRecord:
      if str(temp_DealRecord[4])=='0':
        continue
      cursor.execute("""
      Insert into DealRecord values(%s,%s,%s,%s,%s,%s)
      """,(url,temp_DealRecord[0],temp_DealRecord[1],\
         temp_DealRecord[2],temp_DealRecord[3],\
         temp_DealRecord[4]))
      conn.commit()
    Deal_PageCount=Deal_PageCount+1
    print "Page ",Deal_PageCount

以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持三水点靠木。

Python 相关文章推荐
教你用Python脚本快速为iOS10生成图标和截屏
Sep 22 Python
Python 查看文件的编码格式方法
Dec 21 Python
tensorflow中next_batch的具体使用
Feb 02 Python
pandas数据预处理之dataframe的groupby操作方法
Apr 13 Python
Python logging模块用法示例
Aug 28 Python
Python中实例化class的执行顺序示例详解
Oct 14 Python
在Python中通过getattr获取对象引用的方法
Jan 21 Python
Python读写文件基础知识点
Jun 10 Python
Python实现快速排序的方法详解
Oct 25 Python
PyQt5中向单元格添加控件的方法示例
Mar 24 Python
Python基于numpy模块实现回归预测
May 14 Python
Python编程根据字典列表相同键的值进行合并
Oct 05 Python
python列表生成式与列表生成器的使用
Feb 23 #Python
1分钟快速生成用于网页内容提取的xslt
Feb 23 #Python
python使用xslt提取网页数据的方法
Feb 23 #Python
Python爬虫使用Selenium+PhantomJS抓取Ajax和动态HTML内容
Feb 23 #Python
python爬虫获取多页天涯帖子
Feb 23 #Python
Python即时网络爬虫项目启动说明详解
Feb 23 #Python
Python爬豆瓣电影实例
Feb 23 #Python
You might like
千呼万唤始出来,DOTA2勇士令状不朽宝藏Ⅱ现已推出
2020/08/25 DOTA
PHP-MySQL教程归纳总结
2008/06/07 PHP
PHP使用内置函数file_put_contents写入文件及追加内容的方法
2015/12/07 PHP
PHP常见数组函数用法小结
2016/03/21 PHP
PHP crypt()函数的用法讲解
2019/02/15 PHP
兼容IE/Firefox/Opera/Safari的检测页面装载完毕的脚本Ext.onReady的实现
2009/07/14 Javascript
jquery ajax执行后台方法
2010/03/18 Javascript
Js 随机数产生6位数字
2010/05/13 Javascript
怎样在JavaScript里写一个swing把数据插入数据库
2012/12/10 Javascript
jquery实现下拉菜单的二级联动利用json对象从DB取值显示联动
2014/03/27 Javascript
javascript根据时间生成m位随机数最大13位
2014/10/30 Javascript
使用正则表达式的格式化与高亮显示json字符串
2014/12/03 Javascript
JavaScript中实现单体模式分享
2015/01/29 Javascript
jQuery获得指定元素坐标的方法
2015/04/14 Javascript
谈谈我对JavaScript DOM事件的理解
2015/12/18 Javascript
easyui-edatagrid.js实现回车键结束编辑功能的实例
2017/04/12 Javascript
原生js的ajax和解决跨域的jsonp(实例讲解)
2017/10/16 Javascript
JavaScript中.min.js和.js文件的区别讲解
2019/02/13 Javascript
详解vue-cli+es6引入es5写的js(两种方法)
2019/04/19 Javascript
webpack4 SplitChunks实现代码分隔详解
2019/05/23 Javascript
javascript读取本地文件和目录方法详解
2020/08/06 Javascript
js实现日历
2020/11/07 Javascript
[55:23]VGJ.T vs Winstrike 2018国际邀请赛小组赛BO2 第二场 8.17
2018/08/20 DOTA
Python中利用Scipy包的SIFT方法进行图片识别的实例教程
2016/06/03 Python
python3爬取各类天气信息
2018/02/24 Python
Python中Selenium模块的使用详解
2020/10/09 Python
L’AGENCE官网:加州女装品牌
2018/06/03 全球购物
Dr. Martens马汀博士德国官网:马丁靴鼻祖
2019/12/26 全球购物
毕业生医学检验求职信
2013/10/16 职场文书
日语求职信范文
2013/12/17 职场文书
收款授权委托书
2014/10/02 职场文书
2014年工商所工作总结
2014/12/09 职场文书
经典导游欢迎词
2015/01/26 职场文书
《百分数的认识》教学反思
2016/02/19 职场文书
python爬虫之selenium库的安装及使用教程
2021/05/23 Python
在NumPy中深拷贝和浅拷贝相关操作的定义和背后的原理
2022/04/14 Python