对列是在内存中建立的,若是整个进程里的程序运行完毕以后会被清空,消息就清空了。html
class Queue: '''Create a queue object with a given maximum size. If maxsize is <= 0, the queue size is infinite. ''' def __init__(self, maxsize=0): self.maxsize = maxsize self._init(maxsize) # mutex must be held whenever the queue is mutating. All methods # that acquire mutex must release it before returning. mutex # is shared between the three conditions, so acquiring and # releasing the conditions also acquires and releases mutex. self.mutex = threading.Lock() # Notify not_empty whenever an item is added to the queue; a # thread waiting to get is notified then. self.not_empty = threading.Condition(self.mutex) # Notify not_full whenever an item is removed from the queue; # a thread waiting to put is notified then. self.not_full = threading.Condition(self.mutex) # Notify all_tasks_done whenever the number of unfinished tasks # drops to zero; thread waiting to join() is notified to resume self.all_tasks_done = threading.Condition(self.mutex) self.unfinished_tasks = 0 def task_done(self): '''Indicate that a formerly enqueued task is complete. Used by Queue consumer threads. For each get() used to fetch a task, a subsequent call to task_done() tells the queue that the processing on the task is complete. If a join() is currently blocking, it will resume when all items have been processed (meaning that a task_done() call was received for every item that had been put() into the queue). Raises a ValueError if called more times than there were items placed in the queue. ''' with self.all_tasks_done: unfinished = self.unfinished_tasks - 1 if unfinished <= 0: if unfinished < 0: raise ValueError('task_done() called too many times') self.all_tasks_done.notify_all() self.unfinished_tasks = unfinished def join(self): '''Blocks until all items in the Queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls task_done() to indicate the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks. ''' with self.all_tasks_done: while self.unfinished_tasks: self.all_tasks_done.wait() def qsize(self): '''Return the approximate size of the queue (not reliable!).''' with self.mutex: return self._qsize() def empty(self): '''Return True if the queue is empty, False otherwise (not reliable!). This method is likely to be removed at some point. Use qsize() == 0 as a direct substitute, but be aware that either approach risks a race condition where a queue can grow before the result of empty() or qsize() can be used. To create code that needs to wait for all queued tasks to be completed, the preferred technique is to use the join() method. ''' with self.mutex: return not self._qsize() def full(self): '''Return True if the queue is full, False otherwise (not reliable!). This method is likely to be removed at some point. Use qsize() >= n as a direct substitute, but be aware that either approach risks a race condition where a queue can shrink before the result of full() or qsize() can be used. ''' with self.mutex: return 0 < self.maxsize <= self._qsize() def put(self, item, block=True, timeout=None): '''Put an item into the queue. If optional args 'block' is true and 'timeout' is None (the default), block if necessary until a free slot is available. If 'timeout' is a non-negative number, it blocks at most 'timeout' seconds and raises the Full exception if no free slot was available within that time. Otherwise ('block' is false), put an item on the queue if a free slot is immediately available, else raise the Full exception ('timeout' is ignored in that case). ''' with self.not_full: if self.maxsize > 0: if not block: if self._qsize() >= self.maxsize: raise Full elif timeout is None: while self._qsize() >= self.maxsize: self.not_full.wait() elif timeout < 0: raise ValueError("'timeout' must be a non-negative number") else: endtime = time() + timeout while self._qsize() >= self.maxsize: remaining = endtime - time() if remaining <= 0.0: raise Full self.not_full.wait(remaining) self._put(item) self.unfinished_tasks += 1 self.not_empty.notify() def get(self, block=True, timeout=None): '''Remove and return an item from the queue. If optional args 'block' is true and 'timeout' is None (the default), block if necessary until an item is available. If 'timeout' is a non-negative number, it blocks at most 'timeout' seconds and raises the Empty exception if no item was available within that time. Otherwise ('block' is false), return an item if one is immediately available, else raise the Empty exception ('timeout' is ignored in that case). ''' with self.not_empty: if not block: if not self._qsize(): raise Empty elif timeout is None: while not self._qsize(): self.not_empty.wait() elif timeout < 0: raise ValueError("'timeout' must be a non-negative number") else: endtime = time() + timeout while not self._qsize(): remaining = endtime - time() if remaining <= 0.0: raise Empty self.not_empty.wait(remaining) item = self._get() self.not_full.notify() return item def put_nowait(self, item): '''Put an item into the queue without blocking. Only enqueue the item if a free slot is immediately available. Otherwise raise the Full exception. ''' return self.put(item, block=False) def get_nowait(self): '''Remove and return an item from the queue without blocking. Only get an item if one is immediately available. Otherwise raise the Empty exception. ''' return self.get(block=False) # Override these methods to implement other queue organizations # (e.g. stack or priority queue). # These will only be called with appropriate locks held # Initialize the queue representation def _init(self, maxsize): self.queue = deque() def _qsize(self): return len(self.queue) # Put a new item in the queue def _put(self, item): self.queue.append(item) # Get an item from the queue def _get(self): return self.queue.popleft()
import queue q = queue.Queue() q.put(123) q.put(456) print(q.get()) #123
class LifoQueue(Queue): '''Variant of Queue that retrieves most recently added entries first.''' def _init(self, maxsize): self.queue = [] def _qsize(self): return len(self.queue) def _put(self, item): self.queue.append(item) def _get(self): return self.queue.pop()
import queue q = queue.LifoQueue() q.put(123) q.put(456) print(q.get()) #456
class PriorityQueue(Queue): '''Variant of Queue that retrieves open entries in priority order (lowest first). Entries are typically tuples of the form: (priority number, data). ''' def _init(self, maxsize): self.queue = [] def _qsize(self): return len(self.queue) def _put(self, item): heappush(self.queue, item) def _get(self): return heappop(self.queue)
import queue q = queue.PriorityQueue() q.put((1,'alex1')) q.put((6,'alex2')) q.put((3,'alex3')) print(q.get()) # 优先级越小愈优先 """ (1, 'alex1') """
class deque(object): """ deque([iterable[, maxlen]]) --> deque object A list-like sequence optimized for data accesses near its endpoints. """ def append(self, *args, **kwargs): # real signature unknown """ Add an element to the right side of the deque. """ pass def appendleft(self, *args, **kwargs): # real signature unknown """ Add an element to the left side of the deque. """ pass def clear(self, *args, **kwargs): # real signature unknown """ Remove all elements from the deque. """ pass def copy(self, *args, **kwargs): # real signature unknown """ Return a shallow copy of a deque. """ pass def count(self, value): # real signature unknown; restored from __doc__ """ D.count(value) -> integer -- return number of occurrences of value """ return 0 def extend(self, *args, **kwargs): # real signature unknown """ Extend the right side of the deque with elements from the iterable """ pass def extendleft(self, *args, **kwargs): # real signature unknown """ Extend the left side of the deque with elements from the iterable """ pass def index(self, value, start=None, stop=None): # real signature unknown; restored from __doc__ """ D.index(value, [start, [stop]]) -> integer -- return first index of value. Raises ValueError if the value is not present. """ return 0 def insert(self, index, p_object): # real signature unknown; restored from __doc__ """ D.insert(index, object) -- insert object before index """ pass def pop(self, *args, **kwargs): # real signature unknown """ Remove and return the rightmost element. """ pass def popleft(self, *args, **kwargs): # real signature unknown """ Remove and return the leftmost element. """ pass def remove(self, value): # real signature unknown; restored from __doc__ """ D.remove(value) -- remove first occurrence of value. """ pass def reverse(self): # real signature unknown; restored from __doc__ """ D.reverse() -- reverse *IN PLACE* """ pass def rotate(self, *args, **kwargs): # real signature unknown """ Rotate the deque n steps to the right (default n=1). If n is negative, rotates left. """ pass def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __contains__(self, *args, **kwargs): # real signature unknown """ Return key in self. """ pass def __copy__(self, *args, **kwargs): # real signature unknown """ Return a shallow copy of a deque. """ pass def __delitem__(self, *args, **kwargs): # real signature unknown """ Delete self[key]. """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __getattribute__(self, *args, **kwargs): # real signature unknown """ Return getattr(self, name). """ pass def __getitem__(self, *args, **kwargs): # real signature unknown """ Return self[key]. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __iadd__(self, *args, **kwargs): # real signature unknown """ Implement self+=value. """ pass def __imul__(self, *args, **kwargs): # real signature unknown """ Implement self*=value. """ pass def __init__(self, iterable=(), maxlen=None): # known case of _collections.deque.__init__ """ deque([iterable[, maxlen]]) --> deque object A list-like sequence optimized for data accesses near its endpoints. # (copied from class doc) """ pass def __iter__(self, *args, **kwargs): # real signature unknown """ Implement iter(self). """ pass def __len__(self, *args, **kwargs): # real signature unknown """ Return len(self). """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value.n """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __reduce__(self, *args, **kwargs): # real signature unknown """ Return state information for pickling. """ pass def __repr__(self, *args, **kwargs): # real signature unknown """ Return repr(self). """ pass def __reversed__(self): # real signature unknown; restored from __doc__ """ D.__reversed__() -- return a reverse iterator over the deque """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __setitem__(self, *args, **kwargs): # real signature unknown """ Set self[key] to value. """ pass def __sizeof__(self): # real signature unknown; restored from __doc__ """ D.__sizeof__() -- size of D in memory, in bytes """ pass maxlen = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """maximum size of a deque or None if unbounded""" __hash__ = None
import queue q = queue.deque() q.append(123) q.append(333) q.append(888) q.append(999) q.appendleft(456) print(q.pop()) print(q.popleft()) """ 999 456 """
处理并发的能力变大啦python
对列的好处是什么,若是没有这个队列的话,每一个连结都有一个最大的链接数,在等着的过程当中若是没有消息对列的话,服务端便须要维护这条链接,这也是对服务端形成资源的消耗和浪费,而这条服务端跟客户端的条接便须要挂起,第1、没有新的链接能够进来,第2、正在链接的客务端其实只是等待着。
redis
若是有对列的存在,它没有链接数的限制,你就不须要去担忧或维护这个空链接。数据库
一次产生订单,可能中间有不少不一样的步骤须要建立,每次产生订单都会耗时耗资源。单次处理请求的能力会下降。ubuntu
目的是提升处理并发的能力和能够支持瞬间爆发的客戶请求vim
当没有消息对列:客户提交了订单的请求,客户端和服务器端同样链接等待服务器端查询后返回数据给客户端api
当有消息对列时:客户提交了订单的请求,把消息推送到消息对列中,此时客户端和服务器端的链接能够断开,不须要一直链接,当服务器端完整了查询后它会 更新数据库的一个状态,此时,客户端会自动刷新到去得到该请求的状态。服务器
另一個好處是當你把請求放在消息對列中,数据处理能力变得更可扩展、更灵活,只须要多添加几个服务器端,它们就会到对列里拿请求来帮忙。多线程
这是一个先进先出的消息对列并发
def __init__(self, maxsize=0): self.maxsize = maxsize self._init(maxsize) # mutex must be held whenever the queue is mutating. All methods # that acquire mutex must release it before returning. mutex # is shared between the three conditions, so acquiring and # releasing the conditions also acquires and releases mutex. self.mutex = threading.Lock() # Notify not_empty whenever an item is added to the queue; a # thread waiting to get is notified then. self.not_empty = threading.Condition(self.mutex) # Notify not_full whenever an item is removed from the queue; # a thread waiting to put is notified then. self.not_full = threading.Condition(self.mutex) # Notify all_tasks_done whenever the number of unfinished tasks # drops to zero; thread waiting to join() is notified to resume self.all_tasks_done = threading.Condition(self.mutex) self.unfinished_tasks = 0
import queue q = queue.Queue(10)
import queue # 這是一個有兩扇門的升降机,先進先出形式。 q = queue.Queue(2) #只容许2我的排队 # 在队列中加数据 q.put(11) q.put(22) q.put(33, timeout=2) # 过了2秒尚未空闲位置客人就会闹着,程序会报错 """ Traceback (most recent call last): File "/s13/Day11/practice/s2.py", line 32, in <module> q.put(33, timeout=2) File "queue.py", line 141, in put raise Full queue.Full """
put(self, item, block=True, timeout=None)
# put - 在队列中加数据 # timeout: 能够加一个超时的时限,过了就报错 # block: 设置是否阻塞 q = queue.Queue(10) # 在队列中加数据 q.put(11) q.put(22) q.put(33, block=False, timeout=2)
get(self, block=True, timeout=None)
# get - 在队列中取数据,默应是阻塞 # timeout: 能够加一个超时的时限,过了就报错 # block: 设置是否阻塞 #First-in-first-out q = queue.Queue(10) # 在队列中加数据 q.put(11) q.put(22) # 在队列中取数据 print(q.qsize()) #查看对列的长度 print(q.get()) print(q.get(timeout=2)) """ True 11 """
q = queue.Queue(10) print(q.empty()) # True 由于当前没有消息在队列中 q.put(11) # 在队列中加数据 print(q.empty()) # False 由于当前有消息在队列中 """ True False """
import queue q = queue.Queue(5) q.put(123) q.put(456) print(q.get()) q.task_done() # 完成一个动做要调用 task_done() 代表本身已经完成任务 print(q.get()) q.task_done() q.join() # 表示若是对列里的任务没有完任完成,就会等待着,不会终止程序。
在集合中找最大或者是最小值的时候能够用 heapq 来解决
portfolio = [ {'name': 'IBM', 'shares': 100, 'price': 91.1}, {'name': 'AAPL', 'shares': 50, 'price': 543.22}, {'name': 'FB', 'shares': 200, 'price': 21.09}, {'name': 'HPQ', 'shares': 35, 'price': 31.75}, {'name': 'YHOO', 'shares': 45, 'price': 16.35}, {'name': 'ACME', 'shares': 75, 'price': 115.65} ] cheap = heapq.nsmallest(3,portfolio, key = lambda s: s['price']) expensive = heapq.nlargest(3,portfolio, key = lambda s: s['price'])
它本质上是经过 socket 来链接而后进行socket通讯
[更新中]
[C1,1]
[C2,2]
[C3,1]
[C1,C2,C2,C3]
数字/ len([C1,C2,C2,C3])
wget http://download.redis.io/redis-stable.tar.gz tar xvzf redis-stable.tar.gz cd redis-stable make
172.16.201.133:6379> ping PONG #链接正常会返回 Pong
redis-cli -h 172.16.201.133 -p 6379 -a mypass
sudo vim /etc/redis/redis.conf #把 bind 127.0.0.1 改为 0.0.0.0 bind 0.0.0.0 #从新启动 Redis 服务器 sudo /etc/init.d/redis-server restart
user@py-ubuntu:~$ redis-server 1899:C 20 Oct 14:58:33.558 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf 1899:M 20 Oct 14:58:33.559 * Increased maximum number of open files to 10032 (it was originally set to 1024). _._ _.-``__ ''-._ _.-`` `. `_. ''-._ Redis 3.0.6 (00000000/0) 64 bit .-`` .-```. ```\/ _.,_ ''-._ ( ' , .-` | `, ) Running in standalone mode |`-._`-...-` __...-.``-._|'` _.-'| Port: 6379 | `-._ `._ / _.-' | PID: 1899 `-._ `-._ `-./ _.-' _.-' |`-._`-._ `-.__.-' _.-'_.-'| | `-._`-._ _.-'_.-' | http://redis.io `-._ `-._`-.__.-'_.-' _.-' |`-._`-._ `-.__.-' _.-'_.-'| | `-._`-._ _.-'_.-' | `-._ `-._`-.__.-'_.-' _.-' `-._ `-.__.-' _.-' `-._ _.-' `-.__.-' 1899:M 20 Oct 14:58:33.564 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128. 1899:M 20 Oct 14:58:33.564 # Server started, Redis version 3.0.6 1899:M 20 Oct 14:58:33.564 # WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect. 1899:M 20 Oct 14:58:33.564 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled. 1899:M 20 Oct 14:58:33.565 * DB loaded from disk: 0.000 seconds 1899:M 20 Oct 14:58:33.565 * The server is now ready to accept connections on port 6379 user@py-ubuntu:~$ redis-cli -h 192.168.80.128 -p 6379 192.168.80.128:6379>
SET mykey "apple" # 设置1条key,value组合 SETNX mykey "redis" # 设置1条key,value组合若是当前库没有这条key存在 MSET k1 "v1" k2 "v2" k3 "v3" # 同时设置多条key,value组合
172.16.201.133:6379> HMSET person name "Janice" sex "F" age 20 #设置person的key,value组合 OK 172.16.201.133:6379> HGETALL person #查看person的全部key,value 1) "name" 2) "Janice" 3) "sex" 4) "F" 5) "age" 6) "20" 172.16.201.133:6379> HEXISTS person name #查看person中有没有name这个key,若是有,返回1 (integer) 1 172.16.201.133:6379> HEXISTS person birthday #查看person中有没有birthday这个key,若是沒有,返回0 (integer) 0 172.16.201.133:6379> HGET person name #查看person中name key的值 "Janice" 172.16.201.133:6379> HKEYS person #查看person有哪些key 1) "name" 2) "sex" 3) "age" 172.16.201.133:6379> HMGET person name sex age #查看person这些key的值 1) "Janice" 2) "F" 3) "20" 172.16.201.133:6379> HVALS person #查看person的全部值 1) "Janice" 2) "F" 3) "20" 172.16.201.133:6379> HLEN person #查看person的长度 (integer) 3
#Subscriber 172.16.201.133:6379> SUBSCRIBE redisChat Reading messages... (press Ctrl-C to quit) 1) "subscribe" 2) "redisChat" 3) (integer) 1 -------------------------------------------------------- 1) "message" 2) "redisChat" 3) "Redis is a great caching technique" 1) "message" 2) "redisChat" 3) "Learn redis by tutorials point" #Publisher 172.16.201.133:6379> PUBLISH redisChat "Redis is a great caching technique" (integer) 1 172.16.201.133:6379> PUBLISH redisChat "Learn redis by tutorials point" (integer) 1
链接 Redis 有两种方法,一种是直接链接,一种是经过线程池的方式去链接,试说明两种方法的优势和缺点。发送数据前的链接是很是耗时的,你发送数据可能须要1秒,但每次链接可能须要5秒,若是维护了一个链接池的话,就不用每次都从新链接数据库。
import redis r = redis.Redis(host='172.16.201.133', port=6379) r.set('fruits','apple') # 设置key print(r.get('fruits')) # 返回字节
import redis pool = redis.ConnectionPool(host='172.16.201.133', port=6379) #建立一个线程池 r = redis.Redis(connection_pool=pool) # 把线程池传入这个 redis 对象里 r.set('fruits','apple') # 设置key print(r.get('fruits')) # 返回字节
>>> import redis >>> pool = redis.ConnectionPool(host='172.16.201.133', port=6379) >>> r = redis.Redis(connection_pool=pool) >>> r.lpush('li',11,22,33,44,55,66,77,88,99) 9 >>> r.lpop('li') # get the list from the left b'99' >>> r.lrange('li',2,5) [b'66', b'55', b'44', b'33'] >>> r.rpop('li') # get the list from the right b'11'
>>> import redis >>> r = redis.Redis(host='172.16.201.133', port=6379) >>> r.hmset('person',{'name':'Janice','sex':'F','age':20}) True >>> r.hset('movies','name','Doctors') 0 >>> r.hsetnx('movies','name','Secret Garden') 0 >>> print(r.hget('movies','name')) b'Doctors' >>> print(r.hget('person','name')) b'Janice' >>> print(r.hgetall('person')) {b'sex': b'F', b'name': b'Janice', b'age': b'20'} >>> print(r.hmget('person',['name','sex','age'])) [b'Janice', b'F', b'20'] >>> print(r.hexists('person','name')) True >>> print(r.hexists('person','birthday')) False >>> print(r.hvals('person')) [b'Janice', b'F', b'20'] >>> print(r.hvals('movies')) [b'Doctors'] >>> print(r.hlen('person')) 3 >>> print(r.hdel('person','sex','gender','age')) 2 >>> print(r.hgetall('person')) {b'name': b'Janice'}
>>> import redis >>> r = redis.Redis(host='172.16.201.133', port=6379) >>> r.set('fruits', 'apple') True >>> r.setnx('k1', 'v1') True >>> r.mset(k2='v2', k3='v3') True >>> r.get('fruits') b'apple' >>> r.get('k1') b'v1' >>> r.mget('k2','k3') [b'v2', b'v3'] >>> r.getrange('fruits',1,4) b'pole' >>> r.strlen('fruits') 5
类 Fabric 主机管理程序开发:
银角大王:Python之路【第九篇】:Python操做 RabbitMQ、Redis、Memcache、SQLAlchemy
金角大王:
其余:Redis 数据类型详解