python多线程爬虫

#! -*-coding:utf-8 -*-

from urllib import request, parse
from bs4 import BeautifulSoup as BS
import json
import datetime
import xlsxwriter
import threading
import queue
import time


url="http://www.lagou.com/jobs/positionAjax.json"
tag = ['companyName', 'companyShortName', 'positionName', 'education', 'salary', 'financeStage', 'companySize',
        'industryField', 'companyLabelList','workYear']  # 这是须要抓取的标签信息,包括公司名称,学历要求,薪资等等

tag_name = ['公司名称', '公司简称', '职位名称', '所需学历', '工资', '公司资质', '公司规模', '所属类别', '公司介绍','工做年限']
def read_page(url,page_num,keyword):
    page_header={
        'Host':'www.lagou.com',
        'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.110 Safari/537.36',
        'Connection':'keep-alive'
    }

    if page_num==1:
        firstFlag=True
    else:
        firstFlag=False
    page_data=parse.urlencode(
        [
            ('first',firstFlag),
            ('pn',page_num),
            ('kd',keyword)

        ]
    )
    reg=request.Request(url,headers=page_header)
    page=request.urlopen(url,data=page_data.encode('utf-8')).read()
    page=page.decode('utf-8')
    return page
def read_max_page(page):
    page_json=json.loads(page)
    max_page_num=page_json['content']['totalPageCount']

    if max_page_num>30:
        max_page_num=30

    return max_page_num


def read_tag(page, tag):
    page_json=json.loads(page)
    page_json=page_json['content']['result']
    page_result=[num for num in range(15)]
    for i in range(15):
        page_result[i]=[]
        for page_tag in tag:
            page_result[i].append(page_json[i].get(page_tag))
        page_result[i][8]=','.join(page_result[i][8])
        print(page_result)
   # return page_result

class MyThread(threading.Thread):
    def __init__(self,records, page_num,keyword):
        threading.Thread.__init__(self, name=page_num)
        self.name =page_num
        self._page_num=page_num
        self._records=records
        self._keyword=keyword

    def run(self):
        while(self._records.qsize()):
            page=read_page(url,self._records.get(),self._keyword)
            read_tag(page,tag)
            time.sleep(0.01)
           # print('下载第%d页'%(self._page_num))

def reptile(records,threadNum,keyword):
    tasks=[]

    for page_num in range(1,10):
        Thread=MyThread(records,page_num,keyword)
        Thread.setDaemon(False)
        Thread.start()
        tasks.append(Thread)
    for task in tasks:
        if task.isAlive():
            tasks.append(task)
            continue

if __name__ == '__main__':
    myQueue=queue.Queue()
    keyword=input('请输入你想要查询的岗位语言')
    starttime = datetime.datetime.now()

    fin_result=[]
    page=read_page(url,1,keyword)
    max_page_num=read_max_page(page)
    for row in range(1,max_page_num):
        myQueue.put(row)

    reptile(myQueue,max_page_num,keyword)
    endtime=datetime.datetime.now()
    time=(endtime-starttime).seconds
    print('总共用时:%s s'%time)
相关文章
相关标签/搜索