在软件开发中常常要管理各类“链接”资源,一般咱们会使用对应的链接池来管理,好比mysql数据库链接能够用sqlalchemy中的池来管理,thrift链接能够经过thriftpool管理,redis-py中的StrictRedis实现自己就是基于链接池的,等等。 而今天介绍的socketpool是一个通用的python链接池库,经过它能够实现任意类型链接的管理,虽然不是很完美,但在一些找不到合适链接池实现、而又不想本身造轮子的时候使用起来会节省不少精力。html
ConnectionPool的初始化函数python
def __init__(self, factory, retry_max=3, retry_delay=.1, timeout=-1, max_lifetime=600., max_size=10, options=None, reap_connections=True, reap_delay=1, backend="thread"): if isinstance(backend, str): self.backend_mod = load_backend(backend) self.backend = backend else: self.backend_mod = backend self.backend = str(getattr(backend, '__name__', backend)) self.max_size = max_size self.pool = getattr(self.backend_mod, 'PriorityQueue')() self._free_conns = 0 self.factory = factory self.retry_max = retry_max self.retry_delay = retry_delay self.timeout = timeout self.max_lifetime = max_lifetime if options is None: self.options = {"backend_mod": self.backend_mod, "pool": self} else: self.options = options self.options["backend_mod"] = self.backend_mod self.options["pool"] = self # bounded semaphore to make self._alive 'safe' self._sem = self.backend_mod.Semaphore(1) self._reaper = None if reap_connections: self.reap_delay = reap_delay self.start_reaper()
这里几个参数的意义:mysql
被启动的reap就是一个单独的线程,定时调用下面的方法把过时的conn回收掉:git
def murder_connections(self): current_pool_size = self.pool.qsize() if current_pool_size > 0: for priority, candidate in self.pool: current_pool_size -= 1 if not self.too_old(candidate): self.pool.put((priority, candidate)) else: self._reap_connection(candidate) if current_pool_size <= 0: break
_reap_connection最终会回调conn对象的invalidate方法(Connector的接口)进行销毁。每次使用完conn后会调用release_connection, 它的逻辑是github
def release_connection(self, conn): if self._reaper is not None: self._reaper.ensure_started() with self._sem: if self.pool.qsize() < self.max_size: connected = conn.is_connected() if connected and not self.too_old(conn): self.pool.put((conn.get_lifetime(), conn)) else: self._reap_connection(conn) else: self._reap_connection(conn)
若是链接还没过时或断开,就会被从新放入优先级队列中,用户能够经过实现Connector接口的get_lifetime来控制这里放回的conn的优先级,priority最小的conn下次会被优先取出redis
Connector定义了哪些接口呢?sql
class Connector(object): def matches(self, **match_options): raise NotImplementedError() def is_connected(self): raise NotImplementedError() def handle_exception(self, exception): raise NotImplementedError() def get_lifetime(self): raise NotImplementedError() def invalidate(self): raise NotImplementedError()
matches方法主要用在pool取出一个conn时,除了优先选择priority最小的conn,还须要这个conn和get(**options)传入的参数match,这个match就是回调conn的matches方法。其余几个接口前面都涉及到了。数据库
来看一下socketpool自带的TcpConnector的实现,实现tcp socket的工厂后端
class TcpConnector(Connector): def __init__(self, host, port, backend_mod, pool=None): self._s = backend_mod.Socket(socket.AF_INET, socket.SOCK_STREAM) self._s.connect((host, port)) self.host = host self.port = port self.backend_mod = backend_mod self._connected = True # use a 'jiggle' value to make sure there is some # randomization to expiry, to avoid many conns expiring very # closely together. self._life = time.time() - random.randint(0, 10) self._pool = pool def __del__(self): self.release() def matches(self, **match_options): target_host = match_options.get('host') target_port = match_options.get('port') return target_host == self.host and target_port == self.port def is_connected(self): if self._connected: return util.is_connected(self._s) return False def handle_exception(self, exception): print('got an exception') print(str(exception)) def get_lifetime(self): return self._life def invalidate(self): self._s.close() self._connected = False self._life = -1 def release(self): if self._pool is not None: if self._connected: self._pool.release_connection(self) else: self._pool = None def send(self, data): return self._s.send(data) def recv(self, size=1024): return self._s.recv(size)
不须要太多额外解释。并发
根据自身项目须要,我用pyhs2实现了一个hive链接池
class HiveConnector(Connector): def __init__(self, host, port, backend_mod, pool=None, authMechanism='NOSASL', **options): self.host = host self.port = port self.backend_mod = backend_mod self._pool = pool self._connected = False self._conn = pyhs2.connect(host=host, port=port, authMechanism=authMechanism ) self._connected = True # use a 'jiggle' value to make sure there is some # randomization to expiry, to avoid many conns expiring very # closely together. self._life = time.time() - random.randint(0, 10) def __del__(self): self.release() def matches(self, **match_options): target_host = match_options.get('host') target_port = match_options.get('port') return target_host == self.host and target_port == self.port def is_connected(self): return self._connected def handle_exception(self, exception): logger.exception("error: %s" % str(exception)) def get_lifetime(self): return self._life def invalidate(self): try: self._conn.close() except: pass finally: self._connected = False self._life = -1 def release(self): if self._pool is not None: if self._connected: self._pool.release_connection(self) else: self._pool = None def cursor(self): return self._conn.cursor() def execute(self, hql): with self.curosr() as cur: return cur.execute(hql) hive_pool = ConnectionPool(factory=HiveConnector, **HIVE_CONNECTOR_CONFIG)
使用这个hive_pool去执行hql语句很是容易:
with hive_pool.connection() as conn: with conn.cursor() as cur: print cur.getDatabases()
简绍了socketpool的内部实现,以及如何使用它构造本身的链接池。