Skip to content

Commit adcaf53

Browse files
authored
Merge pull request #7 from qaspen-python/feature/connection_recycling
Added connection recycling settings
2 parents 8c1dca8 + 9d086a3 commit adcaf53

File tree

8 files changed

+137
-5
lines changed

8 files changed

+137
-5
lines changed

README.md

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,31 @@ async def main() -> None:
8080
# rust does it instead.
8181
```
8282

83+
### Control connection recycling
84+
There are 3 available options to control how a connection is recycled - `Fast`, `Verified` and `Clean`.
85+
As connection can be closed in different situations on various sides you can select preferable behavior of how a connection is recycled.
86+
87+
- `Fast`: Only run `is_closed()` when recycling existing connections.
88+
- `Verified`: Run `is_closed()` and execute a test query. This is slower, but guarantees that the database connection is ready to
89+
be used. Normally, `is_closed()` should be enough to filter
90+
out bad connections, but under some circumstances (i.e. hard-closed
91+
network connections) it's possible that `is_closed()`
92+
returns `false` while the connection is dead. You will receive an error
93+
on your first query then.
94+
- `Clean`: Like [`Verified`] query method, but instead use the following sequence of statements which guarantees a pristine connection:
95+
```sql
96+
CLOSE ALL;
97+
SET SESSION AUTHORIZATION DEFAULT;
98+
RESET ALL;
99+
UNLISTEN *;
100+
SELECT pg_advisory_unlock_all();
101+
DISCARD TEMP;
102+
DISCARD SEQUENCES;
103+
```
104+
This is similar to calling `DISCARD ALL`. but doesn't call
105+
`DEALLOCATE ALL` and `DISCARD PLAN`, so that the statement cache is not
106+
rendered ineffective.
107+
83108
## Query parameters
84109
You can pass parameters into queries.
85110
Parameters can be passed in any `execute` method as the second parameter, it must be a list.

python/psqlpy/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
from ._internal import (
22
Connection,
3+
ConnRecyclingMethod,
34
Cursor,
45
IsolationLevel,
56
PSQLPool,
@@ -16,4 +17,5 @@
1617
"ReadVariant",
1718
"Connection",
1819
"Cursor",
20+
"ConnRecyclingMethod",
1921
]

python/psqlpy/_internal/__init__.pyi

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,49 @@ class ReadVariant(Enum):
2424
ReadOnly = 1
2525
ReadWrite = 2
2626

27+
class ConnRecyclingMethod(Enum):
28+
"""Possible methods of how a connection is recycled.
29+
30+
The default is [`Fast`] which does not check the connection health or
31+
perform any clean-up queries.
32+
33+
# Description:
34+
## Fast:
35+
Only run [`is_closed()`] when recycling existing connections.
36+
37+
Unless you have special needs this is a safe choice.
38+
39+
## Verified:
40+
Run [`is_closed()`] and execute a test query.
41+
42+
This is slower, but guarantees that the database connection is ready to
43+
be used. Normally, [`is_closed()`] should be enough to filter
44+
out bad connections, but under some circumstances (i.e. hard-closed
45+
network connections) it's possible that [`is_closed()`]
46+
returns `false` while the connection is dead. You will receive an error
47+
on your first query then.
48+
49+
## Clean:
50+
Like [`Verified`] query method, but instead use the following sequence
51+
of statements which guarantees a pristine connection:
52+
```sql
53+
CLOSE ALL;
54+
SET SESSION AUTHORIZATION DEFAULT;
55+
RESET ALL;
56+
UNLISTEN *;
57+
SELECT pg_advisory_unlock_all();
58+
DISCARD TEMP;
59+
DISCARD SEQUENCES;
60+
```
61+
This is similar to calling `DISCARD ALL`. but doesn't call
62+
`DEALLOCATE ALL` and `DISCARD PLAN`, so that the statement cache is not
63+
rendered ineffective.
64+
"""
65+
66+
Fast = 1
67+
Verified = 2
68+
Clean = 3
69+
2770
class Cursor:
2871
"""Represent opened cursor in a transaction.
2972
@@ -446,6 +489,7 @@ class PSQLPool:
446489
port: Optional[int] = None,
447490
db_name: Optional[str] = None,
448491
max_db_pool_size: Optional[str] = None,
492+
conn_recycling_method: Optional[ConnRecyclingMethod] = None,
449493
) -> None:
450494
"""Create new PostgreSQL connection pool.
451495
@@ -468,6 +512,7 @@ class PSQLPool:
468512
- `port`: port of postgres
469513
- `db_name`: name of the database in postgres
470514
- `max_db_pool_size`: maximum size of the connection pool
515+
- `conn_recycling_method`: how a connection is recycled.
471516
"""
472517
async def startup(self: Self) -> None:
473518
"""Startup the connection pool.

python/tests/test_connection_pool.py

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import pytest
22

3-
from psqlpy import Connection, PSQLPool, QueryResult
3+
from psqlpy import Connection, ConnRecyclingMethod, PSQLPool, QueryResult
44

55

66
@pytest.mark.anyio
@@ -39,3 +39,25 @@ async def test_pool_connection(
3939
"""Test that PSQLPool can return single connection from the pool."""
4040
connection = await psql_pool.connection()
4141
assert isinstance(connection, Connection)
42+
43+
44+
@pytest.mark.anyio
45+
@pytest.mark.parametrize(
46+
"conn_recycling_method",
47+
[
48+
ConnRecyclingMethod.Fast,
49+
ConnRecyclingMethod.Verified,
50+
ConnRecyclingMethod.Clean,
51+
],
52+
)
53+
async def test_pool_conn_recycling_method(
54+
conn_recycling_method: ConnRecyclingMethod,
55+
) -> None:
56+
pg_pool = PSQLPool(
57+
dsn="postgres://postgres:postgres@localhost:5432/psqlpy_test",
58+
conn_recycling_method=conn_recycling_method,
59+
)
60+
61+
await pg_pool.startup()
62+
63+
await pg_pool.execute("SELECT 1")

src/driver/common_options.rs

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
use deadpool_postgres::RecyclingMethod;
2+
use pyo3::pyclass;
3+
4+
#[pyclass]
5+
#[derive(Clone, Copy)]
6+
pub enum ConnRecyclingMethod {
7+
Fast,
8+
Verified,
9+
Clean,
10+
}
11+
12+
impl ConnRecyclingMethod {
13+
#[must_use]
14+
pub fn to_internal(&self) -> RecyclingMethod {
15+
match self {
16+
ConnRecyclingMethod::Fast => RecyclingMethod::Fast,
17+
ConnRecyclingMethod::Verified => RecyclingMethod::Verified,
18+
ConnRecyclingMethod::Clean => RecyclingMethod::Clean,
19+
}
20+
}
21+
}

src/driver/connection_pool.rs

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ use crate::{
1010
value_converter::{convert_parameters, PythonDTO},
1111
};
1212

13-
use super::connection::Connection;
13+
use super::{common_options::ConnRecyclingMethod, connection::Connection};
1414

1515
/// `PSQLPool` for internal use only.
1616
///
@@ -23,12 +23,14 @@ pub struct RustPSQLPool {
2323
port: Option<u16>,
2424
db_name: Option<String>,
2525
max_db_pool_size: Option<usize>,
26+
conn_recycling_method: Option<ConnRecyclingMethod>,
2627
db_pool: Arc<tokio::sync::RwLock<Option<Pool>>>,
2728
}
2829

2930
impl RustPSQLPool {
3031
/// Create new `RustPSQLPool`.
3132
#[must_use]
33+
#[allow(clippy::too_many_arguments)]
3234
pub fn new(
3335
dsn: Option<String>,
3436
username: Option<String>,
@@ -37,6 +39,7 @@ impl RustPSQLPool {
3739
port: Option<u16>,
3840
db_name: Option<String>,
3941
max_db_pool_size: Option<usize>,
42+
conn_recycling_method: Option<ConnRecyclingMethod>,
4043
) -> Self {
4144
RustPSQLPool {
4245
dsn,
@@ -46,6 +49,7 @@ impl RustPSQLPool {
4649
port,
4750
db_name,
4851
max_db_pool_size,
52+
conn_recycling_method,
4953
db_pool: Arc::new(tokio::sync::RwLock::new(None)),
5054
}
5155
}
@@ -124,6 +128,7 @@ impl RustPSQLPool {
124128
let db_host = self.host.clone();
125129
let db_port = self.port;
126130
let db_name = self.db_name.clone();
131+
let conn_recycling_method = self.conn_recycling_method;
127132
let max_db_pool_size = self.max_db_pool_size;
128133

129134
let mut db_pool_guard = db_pool_arc.write().await;
@@ -163,9 +168,16 @@ impl RustPSQLPool {
163168
}
164169
}
165170

166-
let mgr_config = ManagerConfig {
167-
recycling_method: RecyclingMethod::Fast,
168-
};
171+
let mgr_config: ManagerConfig;
172+
if let Some(conn_recycling_method) = conn_recycling_method {
173+
mgr_config = ManagerConfig {
174+
recycling_method: conn_recycling_method.to_internal(),
175+
}
176+
} else {
177+
mgr_config = ManagerConfig {
178+
recycling_method: RecyclingMethod::Fast,
179+
};
180+
}
169181
let mgr = Manager::from_config(pg_config, NoTls, mgr_config);
170182

171183
let mut db_pool_builder = Pool::builder(mgr);
@@ -186,6 +198,7 @@ pub struct PSQLPool {
186198
#[pymethods]
187199
impl PSQLPool {
188200
#[new]
201+
#[allow(clippy::too_many_arguments)]
189202
#[must_use]
190203
pub fn new(
191204
dsn: Option<String>,
@@ -195,6 +208,7 @@ impl PSQLPool {
195208
port: Option<u16>,
196209
db_name: Option<String>,
197210
max_db_pool_size: Option<usize>,
211+
conn_recycling_method: Option<ConnRecyclingMethod>,
198212
) -> Self {
199213
PSQLPool {
200214
rust_psql_pool: Arc::new(tokio::sync::RwLock::new(RustPSQLPool {
@@ -205,6 +219,7 @@ impl PSQLPool {
205219
port,
206220
db_name,
207221
max_db_pool_size,
222+
conn_recycling_method,
208223
db_pool: Arc::new(tokio::sync::RwLock::new(None)),
209224
})),
210225
}

src/driver/mod.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
pub mod common_options;
12
pub mod connection;
23
pub mod connection_pool;
34
pub mod cursor;

src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ fn psqlpy(py: Python<'_>, pymod: &PyModule) -> PyResult<()> {
1919
pymod.add_class::<driver::cursor::Cursor>()?;
2020
pymod.add_class::<driver::transaction_options::IsolationLevel>()?;
2121
pymod.add_class::<driver::transaction_options::ReadVariant>()?;
22+
pymod.add_class::<driver::common_options::ConnRecyclingMethod>()?;
2223
pymod.add_class::<query_result::PSQLDriverPyQueryResult>()?;
2324
add_module(py, pymod, "extra_types", extra_types_module)?;
2425
add_module(py, pymod, "exceptions", python_exceptions_module)?;

0 commit comments

Comments
 (0)