python爬取微信公众号文章


Posted in Python onAugust 31, 2018

本文实例为大家分享了python爬取微信公众号文章的具体代码,供大家参考,具体内容如下

# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
from requests.exceptions import RequestException
import time
import random
import MySQLdb
import threading
import socket
import math
 
socket.setdefaulttimeout(60)#这里对整个socket层设置超时时间。后续文件中如果再使用到socket,不必再设置
 
glock = threading.Lock() #定义全局锁
 
CATEGORY_URL= ['http://www.we123.com/gzh/onclick/'] #获取地区分类链接
all_url = [] #
ALL_URLS = [] #所有详细页面链接
proxy_list = [] #IP池
URL = 'http://www.we123.com'
PAGE_URL = [] #所有分页链接
 
#获取Ip池
def get_ip():
  headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}
  url = 'http://http-webapi.zhimaruanjian.com'#可以使用芝麻代理,好用稳定还不贵
  resp = requests.get(url,headers=headers)
  obj = resp.json() #获取json ip池对象
  for ip in obj:
    arr = 'http://' + str(ip['ip']) + ':' + str(ip['port'])
    proxy_list.append(arr)
 
#获取页面源码函数
def get_html(url):
  # headers = {}
  user_agent_list = [
    'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.3538.400 QQBrowser/9.6.12501.400',
    'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0'
  ]
  # user_agent = random.choice(user_agent_list)
  headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.3538.400 QQBrowser/9.6.12501.400'
  }
  # 代理,免费的代理只能维持一会可能就没用了,自行更换
  # proxy_list = [
  #   "http://27.192.185.62:3252",
  # ]
  # proxy_ip = random.choice(proxy_list)
  # proxies = {'http': proxy_ip}
  # print(str(url))
  try:
    resp = requests.get(url,headers=headers)
    # print("72行:status_code = " + str(resp.status_code))
    # print(type(resp.text))
    # print(resp.url) # 请求的url
    if resp.status_code == 200:
      return resp
    elif resp.status_code == 404:
      return resp
    elif resp.status_code == 500:
      return resp
    return resp
  except RuntimeError:
    print("超时")
    return "error"
  except ConnectionError:
    print("连接超时")
    return "error"
  except RequestException:
    print("http请求父类错误")
    with open('url_exception.txt','a+', encoding='utf-8') as f:
      f.write(str(url))
      f.write('\n')
    return "error"
 
#获取区域分类链接
def get_categoty_url():
  url = 'http://www.we123.com/gzh/onclick/'
  resp = get_html(url)
  soup = BeautifulSoup(resp.text,'lxml')
  html = soup.select('div.div-subs2 > div.divst-content > div.divst-subs > li > a')
  # 获取区域分类链接
  for i in html:
    city = i['href'].split("/")[-1]
    if (city == '海外' or city == '台湾' or city == '澳门'):
      continue
    url = URL + i['href']
    CATEGORY_URL.append(url)
  print(CATEGORY_URL)
 
 
#获取每个区域下所有分页链接
def get_page_url(url):
  city = url.split('/')[-1]
  html = get_html(url)
  if html == "error":
    print("98行:connect url error")
    time.sleep(random.randint(10,20))
    return "error"
  soup = BeautifulSoup(html.text,'lxml')
  #获取总条数
  all_nums = soup.select("div.page > a > b")
  if len(all_nums) == 0:
    return "error"
  else:
    all_nums = soup.select("div.page > a > b")[0].get_text()
  #获取总分页数
  all_pages = math.ceil((int(all_nums) / 30))
  #获取所有分页链接
  all_page_url = []
  for i in range(0,int(all_pages)):
    page_url = 'http://www.we123.com/e/action/ListInfo.php?page=' + str(i) + '&classid=45&line=30&tempid=10&orderby=onclick&myorder=0&totalnum=' + str(all_nums)
    all_page_url.append(page_url)
  return all_page_url
 
# 获取所有详细页面链接
def get_page_urls():
    global PAGE_URL
    c_url = CATEGORY_URL.pop()
    print('121 行:请求链接' + c_url)
    PAGE_URL = get_page_url(c_url) #获取每个区域下面的所有分页链接
 
# 获取所有详细页面链接
def get_info_urls():
  while True:
    global PAGE_URL #设置全局变量
    glock.acquire() #加锁
    if len(PAGE_URL) == 0:
      glock.release() #解锁
      print('131 行:CATEGORY_URL 为空')
      break
    else:
      p_url = PAGE_URL.pop()
      print('135 行:请求链接' + p_url)
      glock.release() #解锁
 
      glock.acquire() #加锁
      html = get_html(p_url)
      if html == "error":
        print("141行:connect url error")
        time.sleep(2)
        return
      soup = BeautifulSoup(html.text,'lxml')
      info_urls = soup.select('div.gzhRight > div.gzh_list > ul > li > a')
      for x in info_urls:
        i_url = URL + x['href']
        ALL_URLS.append(i_url)
      print("库存链接共:" + str(len(ALL_URLS)))
    glock.release() #解锁
#获取每一页需要的数据
def get_data():
  while True:
    global ALL_URLS #设置全局变量
    glock.acquire() #加锁
    print("当前库存:"+str(len(ALL_URLS)))
    if len(ALL_URLS) == 0:
      glock.release() #解锁
      print('159 行 :ALL_URLS 为空')
      break
    else:
      url = ALL_URLS.pop()
      print("开始抓取数据:" + url)
      glock.release() #解锁
      time.sleep(1) #睡眠1秒钟
      html = get_html(url)
      if html == "error":
        print("168行:connect url error")
        time.sleep(random.randint(2, 4))
        return
      html.encoding='utf-8' #显式地指定网页编码,一般情况可以不用
      soup = BeautifulSoup(html.text,'lxml')
      #公众号名称
      names = soup.select('div.artcleLeft > div.xcxnry > div.xcxtop > div.xcxtop_left > div.gzhtop_logo > h1')
      #微信号id
      accounts = []
      accounts.append(soup.select('div.artcleLeft > div.xcxnry > div.xcxtop > div.xcxtop_left > div.gzhtop_logo > p')[0])
      #微信头像
      imgs = soup.select('div.artcleLeft > div.xcxnry > div.xcxtop > div.xcxtop_left > div.gzhtop_logo > img')
      #公众号二维码
      QR_codes= soup.select('div.artcleLeft > div.xcxnry > div.xcxtop > div.xcxtop_right > img')
      #介绍
      descs = soup.select('div.artcleLeft > div.xcxnry > div.xcxinfo')
      #公众号分类
      categorys = []
      category = ''
      cate = soup.select('div.artcleLeft > div.xcxnry > div.xcxtop > div.xcxtop_left > div.xcx_p > span > a')
      if not len(cate) == 0:
        category = cate[0].get_text()
      else:
        category = '综合'
      glock.acquire() #加锁
      for name,account,img,QR_code,desc in zip(names,accounts,imgs,QR_codes,descs):
        data = {
          'name':name.get_text(),
          'category':category,
          'account':account.get_text().split(":")[-1],
          'img':img['src'],
          'QR_code':QR_code['src'],
          'desc':desc.get_text()
        }
        add_data(data,url)
      glock.release() #解锁
#添加数据
def add_data(data,url):
  con = MySQLdb.connect('127.0.0.1','root','root','test',charset="utf8",use_unicode=True)
  cursor = con.cursor()
  # exit()
  insert_sql = """
    insert ignore into weixin5(w_name,category,account,img,QR_code,introduce)
    VALUES (%s,%s,%s,%s,%s,%s)
    """
  print('212行 :' + data['name'] + '_' + data['account'] + '添加成功!-' + url)
  try:
    cursor.execute(insert_sql,(data['name'],data['category'],data['account'],data['img'],data['QR_code'],str(data['desc'])))
    con.commit()
  except:
    ALL_URLS.insert(0,url)
    print("218行:" + URL + '插入失败')
    con.rollback()
  con.close()
 
# 将时间字符串转化为时间戳
def time_to(dt):
  timeArray = time.strptime(dt, "%Y年%m月%d日")
  timestamp = int(time.mktime(timeArray))
  return timestamp
 
#启动多线程爬取
def main():
  for x in range(3):
    th = threading.Thread(target=get_info_urls)
    th.start()
     # get_info_urls()
  time.sleep(3)
  for x in range(5):
    th = threading.Thread(target=get_data)
    th.start()
 
if __name__ == '__main__':
  # 计时
  t1 = time.time()
  # 调用函数
  get_ip() #获取ip池
  get_page_urls()
  time.sleep(2)
  # get_categoty_url()
  main()
  print(time.time() - t1)

以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持三水点靠木。

Python 相关文章推荐
Python类定义和类继承详解
May 08 Python
简单理解Python中的装饰器
Jul 31 Python
Python开发之快速搭建自动回复微信公众号功能
Apr 22 Python
Python常见格式化字符串方法小结【百分号与format方法】
Sep 18 Python
Python序列化与反序列化pickle用法实例
Nov 11 Python
Python利用FFT进行简单滤波的实现
Feb 26 Python
Python发送手机动态验证码代码实例
Feb 28 Python
基于Numba提高python运行效率过程解析
Mar 02 Python
Keras:Unet网络实现多类语义分割方式
Jun 11 Python
Python如何使用vars返回对象的属性列表
Oct 17 Python
解决Pytorch dataloader时报错每个tensor维度不一样的问题
May 28 Python
Python实战实现爬取天气数据并完成可视化分析详解
Jun 16 Python
Python单向链表和双向链表原理与用法实例详解
Aug 31 #Python
Python使用Flask-SQLAlchemy连接数据库操作示例
Aug 31 #Python
浅谈Python traceback的优雅处理
Aug 31 #Python
python梯度下降法的简单示例
Aug 31 #Python
wxPython的安装与使用教程
Aug 31 #Python
python traceback捕获并打印异常的方法
Aug 31 #Python
基于python中theano库的线性回归
Aug 31 #Python
You might like
一键删除顽固的空文件夹 软件下载
2007/01/26 PHP
php SQL防注入代码集合
2008/04/25 PHP
Codeigniter的dom类用法实例
2015/06/26 PHP
FileUpload 控件 禁止手动输入或粘贴的实现代码
2010/04/07 Javascript
JavaScript类和继承 prototype属性
2010/09/03 Javascript
jquery 读取页面load get post ajax 四种方式代码写法
2011/04/02 Javascript
屏蔽网页右键复制和ctrl+c复制的js代码
2013/01/04 Javascript
Jquery实现鼠标移上弹出提示框、移出消失思路及代码
2013/05/19 Javascript
JavaScript获取多个数组的交集简单实例
2013/11/11 Javascript
js禁止回车提交表单的示例代码
2013/12/23 Javascript
基于JavaScript实现手机短信按钮倒计时(超简单)
2015/12/30 Javascript
深入理解JavaScript中的并行处理
2016/09/22 Javascript
关于vue.extend和vue.component的区别浅析
2017/08/16 Javascript
javascript中new Array()和var arr=[]用法区别
2017/12/01 Javascript
vue.js中$set与数组更新方法
2018/03/08 Javascript
Vue.js组件高级特性实例详解
2018/12/24 Javascript
vue中子组件传递数据给父组件的讲解
2019/01/27 Javascript
vue点击按钮动态创建与删除组件功能
2019/12/29 Javascript
vue中touch和click共存的解决方式
2020/07/28 Javascript
跟老齐学Python之集成开发环境(IDE)
2014/09/12 Python
Python类的专用方法实例分析
2015/01/09 Python
详解Django中的权限和组以及消息
2015/07/23 Python
Python 爬虫学习笔记之单线程爬虫
2016/09/21 Python
Python下载网络小说实例代码
2018/02/03 Python
python中in在list和dict中查找效率的对比分析
2018/05/04 Python
浅谈python中requests模块导入的问题
2018/05/18 Python
python 2.7.13 安装配置方法图文教程
2018/09/18 Python
python针对不定分隔符切割提取字符串的方法
2018/10/26 Python
Python多继承以及MRO顺序的使用
2019/11/11 Python
Python namedtuple命名元组实现过程解析
2020/01/08 Python
TensorFlow通过文件名/文件夹名获取标签,并加入队列的实现
2020/02/17 Python
HTML5本地存储和本地数据库实例详解
2017/09/05 HTML / CSS
社区敬老月活动实施方案
2014/02/17 职场文书
68句权威创业名言
2019/08/26 职场文书
MySQL 1130异常,无法远程登录解决方案详解
2021/08/23 MySQL
SQL bool盲注和时间盲注详解
2022/07/23 SQL Server