Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
What's new
7
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Open sidebar
openldap
OpenLDAP
Commits
a720011c
Commit
a720011c
authored
Dec 31, 2006
by
Howard Chu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Ditch LRU cache replacement in favor of 2nd-chance/clock.
Much better concurrency.
parent
ff93c6be
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
192 additions
and
208 deletions
+192
-208
servers/slapd/back-bdb/back-bdb.h
servers/slapd/back-bdb/back-bdb.h
+5
-3
servers/slapd/back-bdb/cache.c
servers/slapd/back-bdb/cache.c
+183
-195
servers/slapd/back-bdb/init.c
servers/slapd/back-bdb/init.c
+4
-10
No files found.
servers/slapd/back-bdb/back-bdb.h
View file @
a720011c
...
@@ -94,6 +94,7 @@ typedef struct bdb_entry_info {
...
@@ -94,6 +94,7 @@ typedef struct bdb_entry_info {
#define CACHE_ENTRY_LOADING 0x10
#define CACHE_ENTRY_LOADING 0x10
#define CACHE_ENTRY_WALKING 0x20
#define CACHE_ENTRY_WALKING 0x20
#define CACHE_ENTRY_ONELEVEL 0x40
#define CACHE_ENTRY_ONELEVEL 0x40
#define CACHE_ENTRY_REFERENCED 0x80
/*
/*
* remaining fields require backend cache lock to access
* remaining fields require backend cache lock to access
...
@@ -121,8 +122,8 @@ typedef struct bdb_entry_info {
...
@@ -121,8 +122,8 @@ typedef struct bdb_entry_info {
/* for the in-core cache of entries */
/* for the in-core cache of entries */
typedef
struct
bdb_cache
{
typedef
struct
bdb_cache
{
int
c_maxsize
;
int
c_maxsize
;
int
c_cursize
;
int
c_cursize
;
int
c_minfree
;
int
c_minfree
;
int
c_eiused
;
/* EntryInfo's in use */
int
c_eiused
;
/* EntryInfo's in use */
int
c_leaves
;
/* EntryInfo leaf nodes */
int
c_leaves
;
/* EntryInfo leaf nodes */
...
@@ -134,7 +135,8 @@ typedef struct bdb_cache {
...
@@ -134,7 +135,8 @@ typedef struct bdb_cache {
ldap_pvt_thread_rdwr_t
c_rwlock
;
ldap_pvt_thread_rdwr_t
c_rwlock
;
ldap_pvt_thread_mutex_t
lru_head_mutex
;
ldap_pvt_thread_mutex_t
lru_head_mutex
;
ldap_pvt_thread_mutex_t
lru_tail_mutex
;
ldap_pvt_thread_mutex_t
lru_tail_mutex
;
u_int32_t
c_locker
;
/* used by lru cleaner */
ldap_pvt_thread_mutex_t
c_count_mutex
;
ldap_pvt_thread_mutex_t
c_eifree_mutex
;
#ifdef SLAP_ZONE_ALLOC
#ifdef SLAP_ZONE_ALLOC
void
*
c_zctx
;
void
*
c_zctx
;
#endif
#endif
...
...
servers/slapd/back-bdb/cache.c
View file @
a720011c
...
@@ -29,9 +29,9 @@
...
@@ -29,9 +29,9 @@
#include "ldap_rq.h"
#include "ldap_rq.h"
#ifdef BDB_HIER
#ifdef BDB_HIER
#define bdb_cache_lru_
add
hdb_cache_lru_
add
#define bdb_cache_lru_
purge
hdb_cache_lru_
purge
#endif
#endif
static
void
bdb_cache_lru_
add
(
struct
bdb_info
*
bdb
,
EntryInfo
*
ei
);
static
void
bdb_cache_lru_
purge
(
struct
bdb_info
*
bdb
);
static
int
bdb_cache_delete_internal
(
Cache
*
cache
,
EntryInfo
*
e
,
int
decr
);
static
int
bdb_cache_delete_internal
(
Cache
*
cache
,
EntryInfo
*
e
,
int
decr
);
#ifdef LDAP_DEBUG
#ifdef LDAP_DEBUG
...
@@ -40,30 +40,83 @@ static void bdb_lru_print(Cache *cache);
...
@@ -40,30 +40,83 @@ static void bdb_lru_print(Cache *cache);
#endif
#endif
#endif
#endif
/* For concurrency experiments only! */
#if 0
#define ldap_pvt_thread_rdwr_wlock(a) 0
#define ldap_pvt_thread_rdwr_wunlock(a) 0
#define ldap_pvt_thread_rdwr_rlock(a) 0
#define ldap_pvt_thread_rdwr_runlock(a) 0
#endif
#if 0
#define ldap_pvt_thread_mutex_trylock(a) 0
#endif
static
EntryInfo
*
static
EntryInfo
*
bdb_cache_entryinfo_new
(
Cache
*
cache
)
bdb_cache_entryinfo_new
(
Cache
*
cache
)
{
{
EntryInfo
*
ei
=
NULL
;
EntryInfo
*
ei
=
NULL
;
if
(
cache
->
c_eifree
)
{
if
(
cache
->
c_eifree
)
{
ldap_pvt_thread_
rdwr_w
lock
(
&
cache
->
c_
rwlock
);
ldap_pvt_thread_
mutex_
lock
(
&
cache
->
c_
eifree_mutex
);
if
(
cache
->
c_eifree
)
{
if
(
cache
->
c_eifree
)
{
ei
=
cache
->
c_eifree
;
ei
=
cache
->
c_eifree
;
cache
->
c_eifree
=
ei
->
bei_lrunext
;
cache
->
c_eifree
=
ei
->
bei_lrunext
;
}
}
ldap_pvt_thread_
rdwr_w
unlock
(
&
cache
->
c_
rwlock
);
ldap_pvt_thread_
mutex_
unlock
(
&
cache
->
c_
eifree_mutex
);
}
}
if
(
ei
)
{
if
(
!
ei
)
{
ei
->
bei_lrunext
=
NULL
;
ei
=
ch_calloc
(
1
,
sizeof
(
EntryInfo
));
ei
->
bei_state
=
0
;
}
else
{
ei
=
ch_calloc
(
1
,
sizeof
(
struct
bdb_entry_info
));
ldap_pvt_thread_mutex_init
(
&
ei
->
bei_kids_mutex
);
ldap_pvt_thread_mutex_init
(
&
ei
->
bei_kids_mutex
);
}
}
ei
->
bei_state
=
CACHE_ENTRY_REFERENCED
;
return
ei
;
return
ei
;
}
}
/* Note - we now use a Second-Chance / Clock algorithm instead of
* Least-Recently-Used. This tremendously improves concurrency
* because we no longer need to manipulate the lists every time an
* entry is touched. We only need to lock the lists when adding
* or deleting an entry. It's now a circular doubly-linked list.
* We always append to the tail, but the head traverses the circle
* during a purge operation.
*/
static
void
bdb_cache_lru_link
(
Cache
*
cache
,
EntryInfo
*
ei
)
{
/* Insert into circular LRU list */
ldap_pvt_thread_mutex_lock
(
&
cache
->
lru_tail_mutex
);
ei
->
bei_lruprev
=
cache
->
c_lrutail
;
if
(
cache
->
c_lrutail
)
{
ei
->
bei_lrunext
=
cache
->
c_lrutail
->
bei_lrunext
;
cache
->
c_lrutail
->
bei_lrunext
=
ei
;
if
(
ei
->
bei_lrunext
)
ei
->
bei_lrunext
->
bei_lruprev
=
ei
;
}
else
{
ei
->
bei_lrunext
=
ei
->
bei_lruprev
=
ei
;
cache
->
c_lruhead
=
ei
;
}
cache
->
c_lrutail
=
ei
;
ldap_pvt_thread_mutex_unlock
(
&
cache
->
lru_tail_mutex
);
}
#ifdef NO_THREADS
#define NO_DB_LOCK
#endif
/* #define NO_DB_LOCK 1 */
/* Note: The BerkeleyDB locks are much slower than regular
* mutexes or rdwr locks. But the BDB implementation has the
* advantage of using a fixed size lock table, instead of
* allocating a lock object per entry in the DB. That's a
* key benefit for scaling. It also frees us from worrying
* about undetectable deadlocks between BDB activity and our
* own cache activity. It's still worth exploring faster
* alternatives though.
*/
/* Atomically release and reacquire a lock */
/* Atomically release and reacquire a lock */
int
int
bdb_cache_entry_db_relock
(
bdb_cache_entry_db_relock
(
...
@@ -74,7 +127,7 @@ bdb_cache_entry_db_relock(
...
@@ -74,7 +127,7 @@ bdb_cache_entry_db_relock(
int
tryOnly
,
int
tryOnly
,
DB_LOCK
*
lock
)
DB_LOCK
*
lock
)
{
{
#ifdef NO_
THREADS
#ifdef NO_
DB_LOCK
return
0
;
return
0
;
#else
#else
int
rc
;
int
rc
;
...
@@ -110,7 +163,7 @@ static int
...
@@ -110,7 +163,7 @@ static int
bdb_cache_entry_db_lock
(
DB_ENV
*
env
,
u_int32_t
locker
,
EntryInfo
*
ei
,
bdb_cache_entry_db_lock
(
DB_ENV
*
env
,
u_int32_t
locker
,
EntryInfo
*
ei
,
int
rw
,
int
tryOnly
,
DB_LOCK
*
lock
)
int
rw
,
int
tryOnly
,
DB_LOCK
*
lock
)
{
{
#ifdef NO_
THREADS
#ifdef NO_
DB_LOCK
return
0
;
return
0
;
#else
#else
int
rc
;
int
rc
;
...
@@ -135,13 +188,13 @@ bdb_cache_entry_db_lock( DB_ENV *env, u_int32_t locker, EntryInfo *ei,
...
@@ -135,13 +188,13 @@ bdb_cache_entry_db_lock( DB_ENV *env, u_int32_t locker, EntryInfo *ei,
ei
->
bei_id
,
rw
,
rc
);
ei
->
bei_id
,
rw
,
rc
);
}
}
return
rc
;
return
rc
;
#endif
/* NO_
THREADS
*/
#endif
/* NO_
DB_LOCK
*/
}
}
int
int
bdb_cache_entry_db_unlock
(
DB_ENV
*
env
,
DB_LOCK
*
lock
)
bdb_cache_entry_db_unlock
(
DB_ENV
*
env
,
DB_LOCK
*
lock
)
{
{
#ifdef NO_
THREADS
#ifdef NO_
DB_LOCK
return
0
;
return
0
;
#else
#else
int
rc
;
int
rc
;
...
@@ -165,34 +218,6 @@ bdb_cache_entryinfo_destroy( EntryInfo *e )
...
@@ -165,34 +218,6 @@ bdb_cache_entryinfo_destroy( EntryInfo *e )
return
0
;
return
0
;
}
}
#define LRU_DELETE( cache, ei ) do { \
if ( (ei)->bei_lruprev != NULL ) { \
(ei)->bei_lruprev->bei_lrunext = (ei)->bei_lrunext; \
} else { \
(cache)->c_lruhead = (ei)->bei_lrunext; \
} \
if ( (ei)->bei_lrunext != NULL ) { \
(ei)->bei_lrunext->bei_lruprev = (ei)->bei_lruprev; \
} else { \
(cache)->c_lrutail = (ei)->bei_lruprev; \
} \
(ei)->bei_lrunext = (ei)->bei_lruprev = NULL; \
} while(0)
#define LRU_ADD( cache, ei ) do { \
(ei)->bei_lrunext = (cache)->c_lruhead; \
if ( (ei)->bei_lrunext != NULL ) { \
(ei)->bei_lrunext->bei_lruprev = (ei); \
} \
(cache)->c_lruhead = (ei); \
(ei)->bei_lruprev = NULL; \
if ( !ldap_pvt_thread_mutex_trylock( &(cache)->lru_tail_mutex )) { \
if ( (cache)->c_lrutail == NULL ) \
(cache)->c_lrutail = (ei); \
ldap_pvt_thread_mutex_unlock( &(cache)->lru_tail_mutex ); \
} \
} while(0)
/* Do a length-ordered sort on normalized RDNs */
/* Do a length-ordered sort on normalized RDNs */
static
int
static
int
bdb_rdn_cmp
(
const
void
*
v_e1
,
const
void
*
v_e2
)
bdb_rdn_cmp
(
const
void
*
v_e1
,
const
void
*
v_e2
)
...
@@ -267,6 +292,7 @@ bdb_entryinfo_add_internal(
...
@@ -267,6 +292,7 @@ bdb_entryinfo_add_internal(
ei
->
bei_parent
->
bei_ckids
++
;
ei
->
bei_parent
->
bei_ckids
++
;
#endif
#endif
}
}
bdb_cache_lru_link
(
&
bdb
->
bi_cache
,
ei2
);
*
res
=
ei2
;
*
res
=
ei2
;
return
0
;
return
0
;
...
@@ -315,6 +341,7 @@ bdb_cache_find_ndn(
...
@@ -315,6 +341,7 @@ bdb_cache_find_ndn(
}
}
for
(
bdb_cache_entryinfo_lock
(
eip
);
eip
;
)
{
for
(
bdb_cache_entryinfo_lock
(
eip
);
eip
;
)
{
eip
->
bei_state
|=
CACHE_ENTRY_REFERENCED
;
ei
.
bei_parent
=
eip
;
ei
.
bei_parent
=
eip
;
ei2
=
(
EntryInfo
*
)
avl_find
(
eip
->
bei_kids
,
&
ei
,
bdb_rdn_cmp
);
ei2
=
(
EntryInfo
*
)
avl_find
(
eip
->
bei_kids
,
&
ei
,
bdb_rdn_cmp
);
if
(
!
ei2
)
{
if
(
!
ei2
)
{
...
@@ -392,7 +419,6 @@ hdb_cache_find_parent(
...
@@ -392,7 +419,6 @@ hdb_cache_find_parent(
struct
bdb_info
*
bdb
=
(
struct
bdb_info
*
)
op
->
o_bd
->
be_private
;
struct
bdb_info
*
bdb
=
(
struct
bdb_info
*
)
op
->
o_bd
->
be_private
;
EntryInfo
ei
,
eip
,
*
ei2
=
NULL
,
*
ein
=
NULL
,
*
eir
=
NULL
;
EntryInfo
ei
,
eip
,
*
ei2
=
NULL
,
*
ein
=
NULL
,
*
eir
=
NULL
;
int
rc
;
int
rc
;
int
addlru
=
0
;
ei
.
bei_id
=
id
;
ei
.
bei_id
=
id
;
ei
.
bei_kids
=
NULL
;
ei
.
bei_kids
=
NULL
;
...
@@ -418,7 +444,7 @@ hdb_cache_find_parent(
...
@@ -418,7 +444,7 @@ hdb_cache_find_parent(
ei
.
bei_ckids
=
0
;
ei
.
bei_ckids
=
0
;
/* This node is not fully connected yet */
/* This node is not fully connected yet */
ein
->
bei_state
=
CACHE_ENTRY_NOT_LINKED
;
ein
->
bei_state
|
=
CACHE_ENTRY_NOT_LINKED
;
/* Insert this node into the ID tree */
/* Insert this node into the ID tree */
ldap_pvt_thread_rdwr_wlock
(
&
bdb
->
bi_cache
.
c_rwlock
);
ldap_pvt_thread_rdwr_wlock
(
&
bdb
->
bi_cache
.
c_rwlock
);
...
@@ -440,8 +466,8 @@ hdb_cache_find_parent(
...
@@ -440,8 +466,8 @@ hdb_cache_find_parent(
ein
->
bei_ckids
++
;
ein
->
bei_ckids
++
;
bdb_cache_entryinfo_unlock
(
ein
);
bdb_cache_entryinfo_unlock
(
ein
);
}
}
addlru
=
0
;
}
else
{
bdb_cache_lru_link
(
&
bdb
->
bi_cache
,
ein
);
}
}
/* If this is the first time, save this node
/* If this is the first time, save this node
...
@@ -464,25 +490,21 @@ hdb_cache_find_parent(
...
@@ -464,25 +490,21 @@ hdb_cache_find_parent(
bdb
->
bi_cache
.
c_leaves
++
;
bdb
->
bi_cache
.
c_leaves
++
;
ldap_pvt_thread_rdwr_wunlock
(
&
bdb
->
bi_cache
.
c_rwlock
);
ldap_pvt_thread_rdwr_wunlock
(
&
bdb
->
bi_cache
.
c_rwlock
);
if
(
addlru
)
{
ldap_pvt_thread_mutex_lock
(
&
bdb
->
bi_cache
.
lru_head_mutex
);
bdb_cache_lru_add
(
bdb
,
ein
);
}
addlru
=
1
;
/* Got the parent, link in and we're done. */
/* Got the parent, link in and we're done. */
if
(
ei2
)
{
if
(
ei2
)
{
bdb_cache_entryinfo_lock
(
ei2
);
bdb_cache_entryinfo_lock
(
ei2
);
ein
->
bei_parent
=
ei2
;
ein
->
bei_parent
=
ei2
;
/* Reset all the state info */
for
(
ein
=
eir
;
ein
!=
ei2
;
ein
=
ein
->
bei_parent
)
ein
->
bei_state
&=
~
CACHE_ENTRY_NOT_LINKED
;
avl_insert
(
&
ei2
->
bei_kids
,
(
caddr_t
)
ein
,
bdb_rdn_cmp
,
avl_insert
(
&
ei2
->
bei_kids
,
(
caddr_t
)
ein
,
bdb_rdn_cmp
,
avl_dup_error
);
avl_dup_error
);
ei2
->
bei_ckids
++
;
ei2
->
bei_ckids
++
;
bdb_cache_entryinfo_unlock
(
ei2
);
bdb_cache_entryinfo_unlock
(
ei2
);
bdb_cache_entryinfo_lock
(
eir
);
bdb_cache_entryinfo_lock
(
eir
);
/* Reset all the state info */
for
(
ein
=
eir
;
ein
!=
ei2
;
ein
=
ein
->
bei_parent
)
ein
->
bei_state
&=
~
CACHE_ENTRY_NOT_LINKED
;
*
res
=
eir
;
*
res
=
eir
;
break
;
break
;
}
}
...
@@ -531,94 +553,87 @@ int hdb_cache_load(
...
@@ -531,94 +553,87 @@ int hdb_cache_load(
}
}
#endif
#endif
/* caller must have lru_head_mutex locked. mutex
* will be unlocked on return.
*/
static
void
static
void
bdb_cache_lru_add
(
bdb_cache_lru_purge
(
struct
bdb_info
*
bdb
)
struct
bdb_info
*
bdb
,
EntryInfo
*
ei
)
{
{
DB_LOCK
lock
,
*
lockp
;
EntryInfo
*
elru
,
*
elnext
;
EntryInfo
*
elru
,
*
elprev
;
int
count
,
islocked
;
int
count
=
0
;
LRU_ADD
(
&
bdb
->
bi_cache
,
ei
);
ldap_pvt_thread_mutex_unlock
(
&
bdb
->
bi_cache
.
lru_head_mutex
);
/*
See if we're above the cache size limit
*/
/*
Don't bother if we can't get the lock
*/
if
(
bdb
->
bi_cache
.
c_cursize
<=
bdb
->
bi_cache
.
c_maxsize
)
if
(
ldap_pvt_thread_mutex_trylock
(
&
bdb
->
bi_cache
.
lru_head_mutex
)
)
return
;
return
;
if
(
bdb
->
bi_cache
.
c_locker
)
{
if
(
bdb
->
bi_cache
.
c_cursize
<=
bdb
->
bi_cache
.
c_maxsize
)
{
lockp
=
&
lock
;
ldap_pvt_thread_mutex_unlock
(
&
bdb
->
bi_cache
.
lru_head_mutex
);
}
else
{
lockp
=
NULL
;
}
/* Don't bother if we can't get the lock */
if
(
ldap_pvt_thread_mutex_trylock
(
&
bdb
->
bi_cache
.
lru_tail_mutex
)
)
return
;
return
;
}
count
=
0
;
/* Look for an unused entry to remove */
/* Look for an unused entry to remove */
for
(
elru
=
bdb
->
bi_cache
.
c_lrutail
;
elru
;
elru
=
elprev
)
{
for
(
elru
=
bdb
->
bi_cache
.
c_lruhead
;
elru
;
elru
=
elnext
)
{
elprev
=
elru
->
bei_lruprev
;
elnext
=
elru
->
bei_lrunext
;
if
(
ldap_pvt_thread_mutex_trylock
(
&
elru
->
bei_kids_mutex
))
continue
;
/* If we can successfully writelock it, then
/* This flag implements the clock replacement behavior */
* the object is idle.
if
(
elru
->
bei_state
&
(
CACHE_ENTRY_REFERENCED
))
{
elru
->
bei_state
&=
~
CACHE_ENTRY_REFERENCED
;
bdb_cache_entryinfo_unlock
(
elru
);
continue
;
}
/* If this node is in the process of linking into the cache,
* or this node is being deleted, skip it.
*/
*/
if
(
bdb_cache_entry_db_lock
(
bdb
->
bi_dbenv
,
if
(
elru
->
bei_state
&
(
CACHE_ENTRY_NOT_LINKED
|
bdb
->
bi_cache
.
c_locker
,
elru
,
1
,
1
,
lockp
)
==
0
)
{
CACHE_ENTRY_DELETED
|
CACHE_ENTRY_LOADING
))
{
bdb_cache_entryinfo_unlock
(
elru
);
continue
;
}
islocked
=
1
;
/* If this node is in the process of linking into the cache,
/* Free entry for this node if it's present */
* or this node is being deleted, skip it.
if
(
elru
->
bei_e
)
{
*/
elru
->
bei_e
->
e_private
=
NULL
;
if
(
elru
->
bei_state
&
(
CACHE_ENTRY_NOT_LINKED
|
CACHE_ENTRY_DELETED
))
{
bdb_cache_entry_db_unlock
(
bdb
->
bi_dbenv
,
lockp
);
continue
;
}
/* Free entry for this node if it's present */
if
(
elru
->
bei_e
)
{
elru
->
bei_e
->
e_private
=
NULL
;
#ifdef SLAP_ZONE_ALLOC
#ifdef SLAP_ZONE_ALLOC
bdb_entry_return
(
bdb
,
elru
->
bei_e
,
elru
->
bei_zseq
);
bdb_entry_return
(
bdb
,
elru
->
bei_e
,
elru
->
bei_zseq
);
#else
#else
bdb_entry_return
(
elru
->
bei_e
);
bdb_entry_return
(
elru
->
bei_e
);
#endif
#endif
elru
->
bei_e
=
NULL
;
elru
->
bei_e
=
NULL
;
count
++
;
count
++
;
}
}
/* ITS#4010 if we're in slapcat, and this node is a leaf
/* ITS#4010 if we're in slapcat, and this node is a leaf
* node, free it.
* node, free it.
*
*
* FIXME: we need to do this for slapd as well, (which is
* FIXME: we need to do this for slapd as well, (which is
* why we compute bi_cache.c_leaves now) but at the moment
* why we compute bi_cache.c_leaves now) but at the moment
* we can't because it causes unresolvable deadlocks.
* we can't because it causes unresolvable deadlocks.
*/
*/
if
(
slapMode
&
SLAP_TOOL_READONLY
)
{
if
(
slapMode
&
SLAP_TOOL_READONLY
)
{
if
(
!
elru
->
bei_kids
)
{
if
(
!
elru
->
bei_kids
)
{
/* This does LRU_DELETE for us */
bdb_cache_delete_internal
(
&
bdb
->
bi_cache
,
elru
,
0
);
bdb_cache_delete_internal
(
&
bdb
->
bi_cache
,
elru
,
0
);
bdb_cache_delete_cleanup
(
&
bdb
->
bi_cache
,
elru
);
bdb_cache_delete_cleanup
(
&
bdb
->
bi_cache
,
elru
);
islocked
=
0
;
}
/* Leave node on LRU list for a future pass */
}
else
{
LRU_DELETE
(
&
bdb
->
bi_cache
,
elru
);
}
}
bdb_cache_entry_db_unlock
(
bdb
->
bi_dbenv
,
lockp
);
/* Leave node on LRU list for a future pass */
}
if
(
count
>=
bdb
->
bi_cache
.
c_minfree
)
{
if
(
islocked
)
ldap_pvt_thread_rdwr_wlock
(
&
bdb
->
bi_cache
.
c_rwlock
);
bdb_cache_entryinfo_unlock
(
elru
);
bdb
->
bi_cache
.
c_cursize
-=
count
;
ldap_pvt_thread_rdwr_wunlock
(
&
bdb
->
bi_cache
.
c_rwlock
);
if
(
count
>=
bdb
->
bi_cache
.
c_minfree
)
{
break
;
ldap_pvt_thread_mutex_lock
(
&
bdb
->
bi_cache
.
c_count_mutex
);
}
bdb
->
bi_cache
.
c_cursize
-=
count
;
ldap_pvt_thread_mutex_unlock
(
&
bdb
->
bi_cache
.
c_count_mutex
);
break
;
}
}
}
}
ldap_pvt_thread_mutex_unlock
(
&
bdb
->
bi_cache
.
lru_tail_mutex
);
bdb
->
bi_cache
.
c_lruhead
=
elru
;
ldap_pvt_thread_mutex_unlock
(
&
bdb
->
bi_cache
.
lru_head_mutex
);
}
}
EntryInfo
*
EntryInfo
*
...
@@ -822,31 +837,17 @@ load1:
...
@@ -822,31 +837,17 @@ load1:
#endif
#endif
}
}
if
(
rc
==
0
)
{
if
(
rc
==
0
)
{
int
purge
=
0
;
if
(
load
)
{
if
(
load
)
{
ldap_pvt_thread_
rdwr_w
lock
(
&
bdb
->
bi_cache
.
c_
rwlock
);
ldap_pvt_thread_
mutex_
lock
(
&
bdb
->
bi_cache
.
c_
count_mutex
);
bdb
->
bi_cache
.
c_cursize
++
;
bdb
->
bi_cache
.
c_cursize
++
;
ldap_pvt_thread_rdwr_wunlock
(
&
bdb
->
bi_cache
.
c_rwlock
);
if
(
bdb
->
bi_cache
.
c_cursize
>
bdb
->
bi_cache
.
c_maxsize
)
}
purge
=
1
;
ldap_pvt_thread_mutex_unlock
(
&
bdb
->
bi_cache
.
c_count_mutex
);
ldap_pvt_thread_mutex_lock
(
&
bdb
->
bi_cache
.
lru_head_mutex
);
/* If the LRU list has only one entry and this is it, it
* doesn't need to be added again.
*/
if
(
bdb
->
bi_cache
.
c_lruhead
==
bdb
->
bi_cache
.
c_lrutail
&&
bdb
->
bi_cache
.
c_lruhead
==
*
eip
)
{
ldap_pvt_thread_mutex_unlock
(
&
bdb
->
bi_cache
.
lru_head_mutex
);
}
else
{
/* if entry is on LRU list, remove from old spot */
if
(
(
*
eip
)
->
bei_lrunext
||
(
*
eip
)
->
bei_lruprev
)
{
ldap_pvt_thread_mutex_lock
(
&
bdb
->
bi_cache
.
lru_tail_mutex
);
LRU_DELETE
(
&
bdb
->
bi_cache
,
*
eip
);
ldap_pvt_thread_mutex_unlock
(
&
bdb
->
bi_cache
.
lru_tail_mutex
);
}
/* lru_head_mutex is unlocked for us */
bdb_cache_lru_add
(
bdb
,
*
eip
);
}
}
if
(
purge
)
bdb_cache_lru_purge
(
bdb
);
}
}
#ifdef SLAP_ZONE_ALLOC
#ifdef SLAP_ZONE_ALLOC
...
@@ -890,7 +891,7 @@ bdb_cache_add(
...
@@ -890,7 +891,7 @@ bdb_cache_add(
DB_LOCK
*
lock
)
DB_LOCK
*
lock
)
{
{
EntryInfo
*
new
,
ei
;
EntryInfo
*
new
,
ei
;
int
rc
;
int
rc
,
purge
=
0
;
#ifdef BDB_HIER
#ifdef BDB_HIER
struct
berval
rdn
=
e
->
e_name
;
struct
berval
rdn
=
e
->
e_name
;
#endif
#endif
...
@@ -931,21 +932,22 @@ bdb_cache_add(
...
@@ -931,21 +932,22 @@ bdb_cache_add(
}
}
new
->
bei_e
=
e
;
new
->
bei_e
=
e
;
e
->
e_private
=
new
;
e
->
e_private
=
new
;
new
->
bei_state
=
CACHE_ENTRY_NO_KIDS
|
CACHE_ENTRY_NO_GRANDKIDS
;
new
->
bei_state
|
=
CACHE_ENTRY_NO_KIDS
|
CACHE_ENTRY_NO_GRANDKIDS
;
eip
->
bei_state
&=
~
CACHE_ENTRY_NO_KIDS
;
eip
->
bei_state
&=
~
CACHE_ENTRY_NO_KIDS
;
if
(
eip
->
bei_parent
)
{
if
(
eip
->
bei_parent
)
{
eip
->
bei_parent
->
bei_state
&=
~
CACHE_ENTRY_NO_GRANDKIDS
;
eip
->
bei_parent
->
bei_state
&=
~
CACHE_ENTRY_NO_GRANDKIDS
;
}
}
bdb_cache_entryinfo_unlock
(
eip
);
bdb_cache_entryinfo_unlock
(
eip
);
++
bdb
->
bi_cache
.
c_cursize
;
ldap_pvt_thread_rdwr_wunlock
(
&
bdb
->
bi_cache
.
c_rwlock
);
ldap_pvt_thread_rdwr_wunlock
(
&
bdb
->
bi_cache
.
c_rwlock
);
ldap_pvt_thread_mutex_lock
(
&
bdb
->
bi_cache
.
c_count_mutex
);
++
bdb
->
bi_cache
.
c_cursize
;
if
(
bdb
->
bi_cache
.
c_cursize
>
bdb
->
bi_cache
.
c_maxsize
)
purge
=
1
;
ldap_pvt_thread_mutex_unlock
(
&
bdb
->
bi_cache
.
c_count_mutex
);
/* set lru mutex */
if
(
purge
)
ldap_pvt_thread_mutex_lock
(
&
bdb
->
bi_cache
.
lru_head_mutex
);