Python 微信爬虫完整实例【单线程与多线程】


Posted in Python onJuly 06, 2019

本文实例讲述了Python 实现的微信爬虫。分享给大家供大家参考,具体如下:

单线程版:

import urllib.request
import urllib.parse
import urllib.error
import re,time
headers = ("User-Agent",
      "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3107.4 Safari/537.36")
operner = urllib.request.build_opener()
operner.addheaders = [headers]
urllib.request.install_opener(operner)
list_url = []
###使用代理获取网页url内容
def use_proxy(url):
  try:
    # proxy = urllib.request.ProxyHandler({'http':proxy_addr})
##使用代理版
    # operner = urllib.request.build_opener()
    # urllib.request.install_opener(operner)
    headers = ("User-Agent",
          "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3107.4 Safari/537.36")
    operner = urllib.request.build_opener()
    operner.addheaders = [headers]
    urllib.request.install_opener(operner)
    data = urllib.request.urlopen(url).read().decode('utf-8')
    # print (data)
    return data
  except urllib.error.URLError as e:
    if hasattr(e, "code"):
      print(e.code)
    elif hasattr(e, "reason"):
      print(e.reason)
  except Exception as e:
    print("exception" + str(e))
    time.sleep(1)
##获取要爬取的url
def get_url(key, pagestart, pageend):
  try:
    keycode = urllib.parse.quote(key)
    for page in range(pagestart, pageend + 1):
      url = "http://weixin.sogou.com/weixin?query=%s&_sug_type_=&s_from=input&_sug_=n&type=%d&page=1&ie=utf8" % (
      keycode, page)
      data1 = use_proxy(url)
      #print("data1的内容是", data1)
      listurl_pattern = '<h3>.*?("http://.*?)</h3>'
      result = re.compile(listurl_pattern, re.S).findall(data1)
      for i in range(len(result)):
        res = result[i].replace("amp;", "").split(" ")[0].replace("\"", "")
        list_url.append(res)
    #print(list_url)
    return list_url
  except urllib.error.URLError as e:
    if hasattr(e, "code"):
      print(e.code)
    elif hasattr(e, "reason"):
      print(e.reason)
  except Exception as e:
    print("exception:", e)
##通过获取的url爬行内容数据并处理
def get_url_content(list_url):
  fh1=open("D:\\python-script\\1.html", 'wb')
  html1 = '''<!DOCTYPE html>\n<html xmlns="http://www.w3.org/1999/xhmtl">\n<head>\n<meta http-equiv="Content-Type" content="text/html; charset=utf-8">\n<title>微信文章</title></head>\n<body>'''
  fh1.write(html1.encode("utf-8"))
  fh1.close()
  fh = open("D:\\python-script\\1.html", 'ab')
  for url in list_url:
    data_content = use_proxy(url)
    #print (data_content)
    #sys.exit()
    title_pattern = '<h2.*>.*?</h2>'
    result_title = re.compile(title_pattern, re.S).findall(data_content)
    ##标题(str)
    res_title = result_title[0].replace("<h2 class=\"rich_media_title\" id=\"activity-name\">", "").replace("</h2>",
                                             "").strip()
    content_pattern = 'id="js_content">(.*?)<div class="rich_media_tool" id="js_sg_bar">'
    content = re.compile(content_pattern, re.S).findall(data_content)
    try:
      fh.write(res_title.encode("utf-8"))
      for i in content:
        fh.write(i.strip().encode("utf-8"))
    except UnicodeEncodeError as e:
      continue
  fh.write("</body></html>".encode("utf-8"))
if __name__ == '__main__':
  pagestart = 1
  pageend = 2
  key = "人工智能"
  get_url(key, pagestart, pageend)
  get_url_content(list_url)

多线程版:

import urllib.request
import urllib.parse
import urllib.error
import re,time
import queue
import threading
headers = ("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3107.4 Safari/537.36")
operner = urllib.request.build_opener()
operner.addheaders = [headers]
urllib.request.install_opener(operner)
urlque = queue.Queue()
list_url = []
###使用代理获取网页url内容
def use_proxy(url):
  try:
    # proxy = urllib.request.ProxyHandler({'http':proxy_addr})
    # operner = urllib.request.build_opener()
    # urllib.request.install_opener(operner)
    headers = ("User-Agent",
          "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3107.4 Safari/537.36")
    operner = urllib.request.build_opener()
    operner.addheaders = [headers]
    urllib.request.install_opener(operner)
    data = urllib.request.urlopen(url).read().decode('utf-8')
    #print (data)
    return data
  except urllib.error.URLError as e:
    if hasattr(e,"code"):
      print (e.code)
    elif hasattr(e,"reason"):
      print (e.reason)
  except Exception as e:
    print ("exception"+str(e))
    time.sleep(1)
###获取文章的url连接,并将连接加入到队列
class get_url(threading.Thread):
  def __init__(self,key,pagestart,pageend,urlque):
    threading.Thread.__init__(self)
    self.pagestart = pagestart
    self.pageend = pageend
    self.key = key
    self.urlque = urlque
  def run(self):
    try:
      keycode = urllib.parse.quote(self.key)
      for page in range(self.pagestart,self.pageend+1):
        url = "http://weixin.sogou.com/weixin?query=%s&_sug_type_=&s_from=input&_sug_=n&type=%d&page=1&ie=utf8" % (keycode,page)
        data = use_proxy(url)
        print ("data1的内容是",data)
        listurl_pattern = '<h3>.*?("http://.*?)</h3>'
        result = re.compile(listurl_pattern,re.S).findall(data)
        print (result)
        if len(result) == 0:
          print ("没有可用的url")
          sys.exit()
        for i in range(len(result)):
          res = result[i].replace("amp;","").split(" ")[0].replace("\"" ,"")
          #list_url.append(res)    #加入列表
          self.urlque.put(res)      ##加入队列
          self.urlque.task_done()
      #return list_url
    except urllib.error.URLError as e:
      if hasattr(e, "code"):
        print(e.code)
      elif hasattr(e, "reason"):
        print(e.reason)
    except Exception as e:
      print ("exception:",e)
##根据url获取文章内容
class get_url_content(threading.Thread):
  def __init__(self,urlque):
    threading.Thread.__init__(self)
    self.urlque = urlque
  def run(self):
    fh1 = open("D:\\python-script\\1.html", 'wb')
    html1 = '''<!DOCTYPE html>\n<html xmlns="http://www.w3.org/1999/xhmtl">\n<head>\n<meta http-equiv="Content-Type" content="text/html; charset=utf-8">\n<title>微信文章</title></head>\n<body>'''
    fh1.write(html1.encode("utf-8"))
    fh1.close()
    fh = open("D:\\python-script\\1.html", 'ab')
    while True:
      try:
        url = self.urlque.get()
        data_content = use_proxy(url)
        title_pattern = '<h2.*>.*?</h2>'
        result_title = re.compile(title_pattern, re.S).findall(data_content)
        ##标题
        res_title = result_title[0].replace("<h2 class=\"rich_media_title\" id=\"activity-name\">", "").replace("</h2>","").strip()
        content_pattern = 'id="js_content">(.*?)<div class="rich_media_tool" id="js_sg_bar">'
        content = re.compile(content_pattern, re.S).findall(data_content)
        #c = '<p style="max-width: 100%;box-sizing: border-box;min-height: 1em;text-indent: 2em;word-wrap: break-word !important;">'
        # for i in content:
        #   ##内容
        #   c_content=i.replace(c, "").replace("<br /></p>", "").replace("</p>", "")
        fh.write(res_title.encode("utf-8"))
        for i in content:
          fh.write(i.strip().encode("utf-8"))
      except UnicodeEncodeError as e:
        continue
      fh.close()
class contrl(threading.Thread):
  def __init__(self,urlqueue):
    threading.Thread.__init__(self)
    self.urlqueue = urlqueue
    while True:
      print ("程序正在执行")
      if self.urlqueue.empty():
        time.sleep(3)
        print ("程序执行完毕")
        exit()
if __name__ == '__main__':
  pagestart = 1
  pageend = 2
  key = "人工智能"
  get_url = get_url(key,pagestart,pageend,urlque)
  get_url.start()
  get_content = get_url_content(urlque)
  get_content.start()
  cntrol = contrl(urlque)
  cntrol.start()

更多关于Python相关内容可查看本站专题:《Python Socket编程技巧总结》、《Python正则表达式用法总结》、《Python数据结构与算法教程》、《Python函数使用技巧总结》、《Python字符串操作技巧汇总》、《Python入门与进阶经典教程》及《Python文件与目录操作技巧汇总》

希望本文所述对大家Python程序设计有所帮助。

Python 相关文章推荐
python结合opencv实现人脸检测与跟踪
Jun 08 Python
pygame加载中文名mp3文件出现error
Mar 31 Python
Python实现爬取百度贴吧帖子所有楼层图片的爬虫示例
Apr 26 Python
Python内存读写操作示例
Jul 18 Python
Python 复平面绘图实例
Nov 21 Python
Python lxml模块的基本使用方法分析
Dec 21 Python
pytorch ImageFolder的覆写实例
Feb 20 Python
Flask和pyecharts实现动态数据可视化
Feb 26 Python
Python 实现平台类游戏添加跳跃功能
Mar 27 Python
Python趣味挑战之教你用pygame画进度条
May 31 Python
使用python+pygame开发消消乐游戏附完整源码
Jun 10 Python
使用Python通过企业微信应用给企业成员发消息
Apr 18 Python
python实现爬取百度图片的方法示例
Jul 06 #Python
python实现控制电脑鼠标和键盘,登录QQ的方法示例
Jul 06 #Python
python3 打印输出字典中特定的某个key的方法示例
Jul 06 #Python
python使用 zip 同时迭代多个序列示例
Jul 06 #Python
Python搭建Spark分布式集群环境
Jul 05 #Python
详解python解压压缩包的五种方法
Jul 05 #Python
用python3 返回鼠标位置的实现方法(带界面)
Jul 05 #Python
You might like
PHP+MySQL 手工注入语句大全 推荐
2009/10/30 PHP
PHP实现微信公众平台音乐点播
2014/03/20 PHP
php获取域名的google收录示例
2014/03/24 PHP
apache中为php 设置虚拟目录
2014/12/17 PHP
php对文件夹进行相关操作(遍历、计算大小)
2015/11/04 PHP
php生成图片缩略图功能示例
2017/02/22 PHP
PHP+AjaxForm异步带进度条上传文件实例代码
2017/08/14 PHP
PHP函数按引用传递参数及函数可选参数用法示例
2018/06/04 PHP
PHP安装memcache扩展的步骤讲解
2019/02/14 PHP
论坛特效代码收集(落伍转发-不错)
2006/12/02 Javascript
用正则xmlHttp实现的偷(转)
2007/01/22 Javascript
jquery ui dialog里调用datepicker的问题
2009/08/06 Javascript
MooTools 1.2介绍
2009/09/14 Javascript
JQUERY 获取IFrame中对象及获取其父窗口中对象示例
2013/08/19 Javascript
网页右侧悬浮滚动在线qq客服代码示例
2014/04/28 Javascript
实例讲解JavaScript中的this指向错误解决方法
2016/06/13 Javascript
利用JQuery阻止事件冒泡
2016/12/01 Javascript
解决ajax不能访问本地文件问题(利用js跨域原理)
2017/01/24 Javascript
vue、react等单页面项目应该这样子部署到服务器
2018/01/03 Javascript
微信小程序实现卡片左右滑动效果的示例代码
2019/05/01 Javascript
axios异步提交表单数据的几种方法
2019/08/11 Javascript
解决vue bus.$emit触发第一次$on监听不到问题
2020/07/28 Javascript
JavaScript浅层克隆与深度克隆示例详解
2020/09/01 Javascript
vue中echarts的用法及与elementui-select的协同绑定操作
2020/11/17 Vue.js
实例讲解Python中global语句下全局变量的值的修改
2016/06/16 Python
ubuntu16.04制作vim和python3的开发环境
2018/09/23 Python
对Python中class和instance以及self的用法详解
2019/06/26 Python
opencv3/C++图像像素操作详解
2019/12/10 Python
详解HTML5表单新增属性
2016/12/21 HTML / CSS
澳大利高级泳装品牌:Bondi Born
2018/05/23 全球购物
工厂保安员岗位职责
2014/01/31 职场文书
上课迟到检讨书300字
2014/10/15 职场文书
计划生育个人总结
2015/03/02 职场文书
收银员岗位职责范本
2015/04/07 职场文书
2015年超市工作总结
2015/04/09 职场文书
人生遥控器观后感
2015/06/11 职场文书