pooledDB参数详解
from DBUtils.PooledDB import PooledDB
1
2
3
4
5
6
7
8
9
10
11
12
13
|
self.__pool = PooledDB(creator=pymysql,
mincached=1,
maxcached=4, # 连接池中最大空闲连接数
maxconnections=4,#允许的最大连接数
blocking=True,# 设置为true,则阻塞并等待直到连接数量减少,false默认情况下将报告错误。
ping=1,#默认=1表示每当从池中获取时,使用ping()检查连接
host=self.host,
port=self.port,
user=self.user,
passwd=self.passwd,
db=self.db_name,
charset=self.charset
)
|
- mincached:连接池中的初始空闲连接,默认0或None表示创建连接池时没有连接。但对照源码及实验效果来看,这个参数并没有起作用。
1
2
3
4
5
|
# PooledDB.py源码 267行
idle = [self.dedicated_connection() for i in range(mincached)]
while idle:
idle.pop().close()
# 确实是创建连接池时创建了mincached个连接,但返回之前都关闭了。所以创建好的时候并没有mincached个初始连接
|
- maxcached:连接池中最大空闲连接数,默认0或None表示没有连接池大小限制
- maxshared:最大共享连接数。默认0或None表示所有连接都是专用的
- maxconnections:最大允许连接数,默认0或None表示没有连接限制
1
2
3
4
5
6
7
8
9
10
11
|
# PooledDB.py源码 255行
if maxconnections:
if maxconnections < maxcached:
maxconnections = maxcached
if maxconnections < maxshared:
maxconnections = maxshared
self._maxconnections = maxconnections
else:
self._maxconnections = 0
# maxcached、maxshared同时影响maxconnections
# maxconnections=max(maxcached, maxshared)
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
|
# PooledDB.py源码 356行
# 当收到一个连接放回请求时
# if 没有最大空闲连接数限制,或现在的空闲连接数小于最大空闲连接数,则将事务回滚,并将这个连接放回空闲连接处;
# else:直接关闭
def cache(self, con):
"""Put a dedicated专用 connection back into the idle空闲 cache."""
self._lock.acquire()
try:
if not self._maxcached or len(self._idle_cache) < self._maxcached:
con._reset(force=self._reset) # rollback possible transaction
# the idle cache is not full, so put it there
self._idle_cache.append(con) # append it to the idle cache
else: # if the idle cache is already full,
con.close() # then close the connection
self._connections -= 1
self._lock.notify()
finally:
self._lock.release()
# cache方法被使用
def close(self):
"""Close the pooled dedicated connection."""
# Instead of actually closing the connection,
# return it to the pool for future reuse.
if self._con:
self._pool.cache(self._con)
self._con = None
|
- blocking:True表示没有空闲可用连接时,堵塞并等待;False表示直接报错。默认为False。
- maximum:单个连接的最大reuse次数,默认0或None表示无限重复使用,当达到连接大最大使用次数,连接将被重置。
1
2
3
4
5
6
|
# SteadyDB.py 483行
if self._maxusage:
if self._usage >= self._maxusage:
# the connection was used too often
raise self._failure
cursor = self._con.cursor(*args, **kwargs) # try to get a cursor
|
- setsession: optional list of SQL commands that may serve to prepare the session, 在连接的时候就会被执行的sql语句。
1
2
3
4
5
6
7
8
9
10
|
# SteadyDB.py 298行
def _setsession(self, con=None):
"""Execute the SQL commands for session preparation."""
if con is None:
con = self._con
if self._setsession_sql:
cursor = con.cursor()
for sql in self._setsession_sql:
cursor.execute(sql)
cursor.close()
|
- reset:连接放回连接池中时是如何被重置的,默认为True。self._transaction仅在begin()内被置为True。默认为True时,true的话每次返回连接池都会回滚事务,False的话只会回滚begin()显式开启的事务.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
|
def cache(self, con):
"""Put a dedicated connection back into the idle cache."""
self._lock.acquire()
try:
if not self._maxcached or len(self._idle_cache) < self._maxcached:
con._reset(force=self._reset) # rollback possible transaction
# the idle cache is not full, so put it there
self._idle_cache.append(con) # append it to the idle cache
else: # if the idle cache is already full,
con.close() # then close the connection
self._connections -= 1
self._lock.notify()
finally:
self._lock.release()
def _reset(self, force=False):
"""Reset a tough connection.
Rollback if forced or the connection was in a transaction.
"""
if not self._closed and (force or self._transaction):
try:
self.rollback()
except Exception:
pass
def begin(self, *args, **kwargs):
"""Indicate the beginning of a transaction.
During a transaction, connections won't be transparently
replaced, and all errors will be raised to the application.
If the underlying driver supports this method, it will be called
with the given parameters (e.g. for distributed transactions).
"""
self._transaction = True
try:
begin = self._con.begin
except AttributeError:
pass
else:
begin(*args, **kwargs)
|
- failures:异常类补充,如果(OperationalError, InternalError)这两个不够。
1
|
except self._failures as error:
|
ping: 官方解释是 (0 = None = never, 1 = default = when _ping_check() is called, 2 = whenever a cursor is created, 4 = when a query is executed, 7 = always, and all other bit combinations of these values 是上面情况的集合),但在源码中只区分了是否非零,似乎数值多少没有太大意义。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
|
def _ping_check(self, ping=1, reconnect=True):
"""Check whether the connection is still alive using ping().
If the the underlying connection is not active and the ping
parameter is set accordingly, the connection will be recreated
unless the connection is currently inside a transaction.
"""
if ping & self._ping:
try: # if possible, ping the connection
alive = self._con.ping()
except (AttributeError, IndexError, TypeError, ValueError):
self._ping = 0 # ping() is not available
alive = None
reconnect = False
except Exception:
alive = False
else:
if alive is None:
alive = True
if alive:
reconnect = False
if reconnect and not self._transaction:
try: # try to reopen the connection
con = self._create()
except Exception:
pass
else:
self._close()
self._store(con)
alive = True
return alive
|
使用方法
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
|
def start_conn(self):
try:
# maxshared 允许的最大共享连接数,默认0/None表示所有连接都是专用的
# 当线程关闭不再共享的连接时,它将返回到空闲连接池中,以便可以再次对其进行回收。
# mincached 连接池中空闲连接的初始连接数,实验证明没啥用
self.__pool = PooledDB(creator=pymysql,
mincached=1, # mincached 连接池中空闲连接的初始连接数,但其实没用
maxcached=4, # 连接池中最大空闲连接数
maxshared=3, #允许的最大共享连接数
maxconnections=2, # 允许的最大连接数
blocking=False, # 设置为true,则阻塞并等待直到连接数量减少,false默认情况下将报告错误。
host=self.host,
port=self.port,
user=self.user,
passwd=self.passwd,
db=self.db_name,
charset=self.charset
)
print("0 start_conn连接数:%s " % (self.__pool._connections))
self.conn = self.__pool.connection()
print('connect success')
print("1 start_conn连接数:%s " % (self.__pool._connections))
self.conn2 = self.__pool.connection()
print("2 start_conn连接数:%s " % (self.__pool._connections))
db3 = self.__pool.connection()
print("3 start_conn连接数:%s " % (self.__pool._connections))
db4 = self.__pool.connection()
print("4 start_conn连接数:%s " % (self.__pool._connections))
db5 = self.__pool.connection()
print("5 start_conn连接数:%s " % (self.__pool._connections))
# self.conn.close()
print("6 start_conn连接数:%s " % (self.__pool._connections))
return True
except:
print('connect failed')
return False
|
0 start_conn连接数:0
connect success
1 start_conn连接数:1
2 start_conn连接数:2
3 start_conn连接数:3
4 start_conn连接数:4
connect failed
如上程序,可对照试验结果,详细理解一下上述的几个参数。
- mincached确实没用,pooledDB对象生成退出后,并没有mincached个初始化连接。
- maxconnections = max(maxcached,maxshared),对照结果来看,最大的连接数显然等于maxcached,maxshared的较大者4,所以可以连续开四个连接,但到第5个时显示连接失败。
- 若将blocking改为True,则实验结果最后一行的”connect failed“不会出现,程序会一直堵塞等待新的空闲连接出现,在本例中,没有操作关闭原有连接,程序会一直堵塞等待。
|