Commit d7fa1e76 authored by Howard Chu's avatar Howard Chu
Browse files

partial revert - keep acquiring BDB lock in lru_purge.

parent b8309b4c
......@@ -127,9 +127,10 @@ typedef struct bdb_cache {
int c_minfree;
int c_eiused; /* EntryInfo's in use */
int c_leaves; /* EntryInfo leaf nodes */
u_int32_t c_locker; /* used by lru cleaner */
EntryInfo c_dntree;
EntryInfo *c_eifree; /* free list */
Avlnode *c_idtree;
Avlnode *c_idtree;
EntryInfo *c_lruhead; /* lru - add accessed entries here */
EntryInfo *c_lrutail; /* lru - rem lru entries from here */
ldap_pvt_thread_rdwr_t c_rwlock;
......
......@@ -556,6 +556,7 @@ int hdb_cache_load(
static void
bdb_cache_lru_purge( struct bdb_info *bdb )
{
DB_LOCK lock, *lockp;
EntryInfo *elru, *elnext;
int count, islocked;
......@@ -568,6 +569,12 @@ bdb_cache_lru_purge( struct bdb_info *bdb )
return;
}
if ( bdb->bi_cache.c_locker ) {
lockp = &lock;
} else {
lockp = NULL;
}
count = 0;
/* Look for an unused entry to remove */
for (elru = bdb->bi_cache.c_lruhead; elru; elru = elnext ) {
......@@ -592,33 +599,43 @@ bdb_cache_lru_purge( struct bdb_info *bdb )
continue;
}
/* entryinfo is locked */
islocked = 1;
/* Free entry for this node if it's present */
if ( elru->bei_e ) {
elru->bei_e->e_private = NULL;
/* If we can successfully writelock it, then
* the object is idle.
*/
if ( bdb_cache_entry_db_lock( bdb->bi_dbenv,
bdb->bi_cache.c_locker, elru, 1, 1, lockp ) == 0 ) {
/* Free entry for this node if it's present */
if ( elru->bei_e ) {
elru->bei_e->e_private = NULL;
#ifdef SLAP_ZONE_ALLOC
bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq );
bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq );
#else
bdb_entry_return( elru->bei_e );
bdb_entry_return( elru->bei_e );
#endif
elru->bei_e = NULL;
count++;
}
/* ITS#4010 if we're in slapcat, and this node is a leaf
* node, free it.
*
* FIXME: we need to do this for slapd as well, (which is
* why we compute bi_cache.c_leaves now) but at the moment
* we can't because it causes unresolvable deadlocks.
*/
if ( slapMode & SLAP_TOOL_READONLY ) {
if ( !elru->bei_kids ) {
bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 );
bdb_cache_delete_cleanup( &bdb->bi_cache, elru );
islocked = 0;
elru->bei_e = NULL;
count++;
}
bdb_cache_entry_dbunlock( bdb, lockp );
/* ITS#4010 if we're in slapcat, and this node is a leaf
* node, free it.
*
* FIXME: we need to do this for slapd as well, (which is
* why we compute bi_cache.c_leaves now) but at the moment
* we can't because it causes unresolvable deadlocks.
*/
if ( slapMode & SLAP_TOOL_READONLY ) {
if ( !elru->bei_kids ) {
bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 );
bdb_cache_delete_cleanup( &bdb->bi_cache, elru );
islocked = 0;
}
/* Leave node on LRU list for a future pass */
}
/* Leave node on LRU list for a future pass */
}
if ( islocked )
......
......@@ -423,6 +423,10 @@ bdb_db_open( BackendDB *be )
goto fail;
}
if ( !quick ) {
XLOCK_ID(bdb->bi_dbenv, &bdb->bi_cache.c_locker);
}
/* monitor setup */
rc = bdb_monitor_db_open( be );
if ( rc != 0 ) {
......@@ -486,6 +490,12 @@ bdb_db_close( BackendDB *be )
/* close db environment */
if( bdb->bi_dbenv ) {
/* Free cache locker if we enabled locking */
if ( !( slapMode & SLAP_TOOL_QUICK )) {
XLOCK_ID_FREE(bdb->bi_dbenv, bdb->bi_cache.c_locker);
bdb->bi_cache.c_locker = 0;
}
/* force a checkpoint, but not if we were ReadOnly,
* and not in Quick mode since there are no transactions there.
*/
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment