LCOV - code coverage report
Current view: top level - source3/lib - g_lock.c (source / functions) Hit Total Coverage
Test: coverage report for abartlet/fix-coverage dd10fb34 Lines: 461 598 77.1 %
Date: 2021-09-23 10:06:22 Functions: 37 40 92.5 %

          Line data    Source code
       1             : /*
       2             :    Unix SMB/CIFS implementation.
       3             :    global locks based on dbwrap and messaging
       4             :    Copyright (C) 2009 by Volker Lendecke
       5             : 
       6             :    This program is free software; you can redistribute it and/or modify
       7             :    it under the terms of the GNU General Public License as published by
       8             :    the Free Software Foundation; either version 3 of the License, or
       9             :    (at your option) any later version.
      10             : 
      11             :    This program is distributed in the hope that it will be useful,
      12             :    but WITHOUT ANY WARRANTY; without even the implied warranty of
      13             :    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      14             :    GNU General Public License for more details.
      15             : 
      16             :    You should have received a copy of the GNU General Public License
      17             :    along with this program.  If not, see <http://www.gnu.org/licenses/>.
      18             : */
      19             : 
      20             : #include "replace.h"
      21             : #include "system/filesys.h"
      22             : #include "lib/util/server_id.h"
      23             : #include "lib/util/debug.h"
      24             : #include "lib/util/talloc_stack.h"
      25             : #include "lib/util/samba_util.h"
      26             : #include "lib/util_path.h"
      27             : #include "dbwrap/dbwrap.h"
      28             : #include "dbwrap/dbwrap_open.h"
      29             : #include "dbwrap/dbwrap_watch.h"
      30             : #include "g_lock.h"
      31             : #include "util_tdb.h"
      32             : #include "../lib/util/tevent_ntstatus.h"
      33             : #include "messages.h"
      34             : #include "serverid.h"
      35             : 
      36             : struct g_lock_ctx {
      37             :         struct db_context *db;
      38             :         struct messaging_context *msg;
      39             :         enum dbwrap_lock_order lock_order;
      40             : };
      41             : 
      42             : struct g_lock {
      43             :         struct server_id exclusive;
      44             :         size_t num_shared;
      45             :         uint8_t *shared;
      46             :         uint64_t unique_data_epoch;
      47             :         size_t datalen;
      48             :         uint8_t *data;
      49             : };
      50             : 
      51     8483145 : static bool g_lock_parse(uint8_t *buf, size_t buflen, struct g_lock *lck)
      52             : {
      53             :         struct server_id exclusive;
      54             :         size_t num_shared, shared_len;
      55             :         uint64_t unique_data_epoch;
      56             : 
      57     8483145 :         if (buflen < (SERVER_ID_BUF_LENGTH + /* exclusive */
      58             :                       sizeof(uint64_t) +     /* seqnum */
      59             :                       sizeof(uint32_t))) {   /* num_shared */
      60      854046 :                 struct g_lock ret = {
      61             :                         .exclusive.pid = 0,
      62      427023 :                         .unique_data_epoch = generate_unique_u64(0),
      63             :                 };
      64      427023 :                 *lck = ret;
      65      426296 :                 return true;
      66             :         }
      67             : 
      68     8056122 :         server_id_get(&exclusive, buf);
      69     8056122 :         buf += SERVER_ID_BUF_LENGTH;
      70     8056122 :         buflen -= SERVER_ID_BUF_LENGTH;
      71             : 
      72     8056122 :         unique_data_epoch = BVAL(buf, 0);
      73     8056122 :         buf += sizeof(uint64_t);
      74     8056122 :         buflen -= sizeof(uint64_t);
      75             : 
      76     8056122 :         num_shared = IVAL(buf, 0);
      77     8056122 :         buf += sizeof(uint32_t);
      78     8056122 :         buflen -= sizeof(uint32_t);
      79             : 
      80     8056122 :         if (num_shared > buflen/SERVER_ID_BUF_LENGTH) {
      81           0 :                 DBG_DEBUG("num_shared=%zu, buflen=%zu\n",
      82             :                           num_shared,
      83             :                           buflen);
      84           0 :                 return false;
      85             :         }
      86             : 
      87     8056122 :         shared_len = num_shared * SERVER_ID_BUF_LENGTH;
      88             : 
      89     8056122 :         *lck = (struct g_lock) {
      90             :                 .exclusive = exclusive,
      91             :                 .num_shared = num_shared,
      92             :                 .shared = buf,
      93             :                 .unique_data_epoch = unique_data_epoch,
      94     8056122 :                 .datalen = buflen-shared_len,
      95     8056122 :                 .data = buf+shared_len,
      96             :         };
      97             : 
      98     8056122 :         return true;
      99             : }
     100             : 
     101           2 : static void g_lock_get_shared(const struct g_lock *lck,
     102             :                               size_t i,
     103             :                               struct server_id *shared)
     104             : {
     105           7 :         if (i >= lck->num_shared) {
     106           0 :                 abort();
     107             :         }
     108          72 :         server_id_get(shared, lck->shared + i*SERVER_ID_BUF_LENGTH);
     109           2 : }
     110             : 
     111          19 : static void g_lock_del_shared(struct g_lock *lck, size_t i)
     112             : {
     113          19 :         if (i >= lck->num_shared) {
     114           0 :                 abort();
     115             :         }
     116          19 :         lck->num_shared -= 1;
     117          19 :         if (i < lck->num_shared) {
     118           4 :                 memcpy(lck->shared + i*SERVER_ID_BUF_LENGTH,
     119           4 :                        lck->shared + lck->num_shared*SERVER_ID_BUF_LENGTH,
     120             :                        SERVER_ID_BUF_LENGTH);
     121             :         }
     122          19 : }
     123             : 
     124     3540987 : static NTSTATUS g_lock_store(
     125             :         struct db_record *rec,
     126             :         struct g_lock *lck,
     127             :         struct server_id *new_shared,
     128             :         const TDB_DATA *new_dbufs,
     129             :         size_t num_new_dbufs)
     130     3540987 : {
     131             :         uint8_t exclusive[SERVER_ID_BUF_LENGTH];
     132             :         uint8_t seqnum_buf[sizeof(uint64_t)];
     133             :         uint8_t sizebuf[sizeof(uint32_t)];
     134             :         uint8_t new_shared_buf[SERVER_ID_BUF_LENGTH];
     135             : 
     136     3540987 :         struct TDB_DATA dbufs[6 + num_new_dbufs];
     137             : 
     138     3540987 :         dbufs[0] = (TDB_DATA) {
     139             :                 .dptr = exclusive, .dsize = sizeof(exclusive),
     140             :         };
     141     3540987 :         dbufs[1] = (TDB_DATA) {
     142             :                 .dptr = seqnum_buf, .dsize = sizeof(seqnum_buf),
     143             :         };
     144     3540987 :         dbufs[2] = (TDB_DATA) {
     145             :                 .dptr = sizebuf, .dsize = sizeof(sizebuf),
     146             :         };
     147     3540987 :         dbufs[3] = (TDB_DATA) {
     148     3540987 :                 .dptr = lck->shared,
     149     3540987 :                 .dsize = lck->num_shared * SERVER_ID_BUF_LENGTH,
     150             :         };
     151     3540987 :         dbufs[4] = (TDB_DATA) { 0 };
     152     3540987 :         dbufs[5] = (TDB_DATA) {
     153     3540987 :                 .dptr = lck->data, .dsize = lck->datalen,
     154             :         };
     155             : 
     156     3540987 :         if (num_new_dbufs != 0) {
     157     1762263 :                 memcpy(&dbufs[6],
     158             :                        new_dbufs,
     159             :                        num_new_dbufs * sizeof(TDB_DATA));
     160             :         }
     161             : 
     162     3540987 :         server_id_put(exclusive, lck->exclusive);
     163     3540987 :         SBVAL(seqnum_buf, 0, lck->unique_data_epoch);
     164             : 
     165     3540987 :         if (new_shared != NULL) {
     166          20 :                 if (lck->num_shared >= UINT32_MAX) {
     167           0 :                         return NT_STATUS_BUFFER_OVERFLOW;
     168             :                 }
     169             : 
     170          20 :                 server_id_put(new_shared_buf, *new_shared);
     171             : 
     172          20 :                 dbufs[4] = (TDB_DATA) {
     173             :                         .dptr = new_shared_buf,
     174             :                         .dsize = sizeof(new_shared_buf),
     175             :                 };
     176             : 
     177          20 :                 lck->num_shared += 1;
     178             :         }
     179             : 
     180     3540987 :         SIVAL(sizebuf, 0, lck->num_shared);
     181             : 
     182     3540987 :         return dbwrap_record_storev(rec, dbufs, ARRAY_SIZE(dbufs), 0);
     183             : }
     184             : 
     185         298 : struct g_lock_ctx *g_lock_ctx_init_backend(
     186             :         TALLOC_CTX *mem_ctx,
     187             :         struct messaging_context *msg,
     188             :         struct db_context **backend)
     189             : {
     190             :         struct g_lock_ctx *result;
     191             : 
     192         298 :         result = talloc(mem_ctx, struct g_lock_ctx);
     193         298 :         if (result == NULL) {
     194           0 :                 return NULL;
     195             :         }
     196         298 :         result->msg = msg;
     197         298 :         result->lock_order = DBWRAP_LOCK_ORDER_NONE;
     198             : 
     199         298 :         result->db = db_open_watched(result, backend, msg);
     200         298 :         if (result->db == NULL) {
     201           0 :                 DBG_WARNING("db_open_watched failed\n");
     202           0 :                 TALLOC_FREE(result);
     203           0 :                 return NULL;
     204             :         }
     205         273 :         return result;
     206             : }
     207             : 
     208         151 : void g_lock_set_lock_order(struct g_lock_ctx *ctx,
     209             :                            enum dbwrap_lock_order lock_order)
     210             : {
     211         151 :         ctx->lock_order = lock_order;
     212         151 : }
     213             : 
     214         147 : struct g_lock_ctx *g_lock_ctx_init(TALLOC_CTX *mem_ctx,
     215             :                                    struct messaging_context *msg)
     216             : {
     217         147 :         char *db_path = NULL;
     218         147 :         struct db_context *backend = NULL;
     219         147 :         struct g_lock_ctx *ctx = NULL;
     220             : 
     221         147 :         db_path = lock_path(mem_ctx, "g_lock.tdb");
     222         147 :         if (db_path == NULL) {
     223           0 :                 return NULL;
     224             :         }
     225             : 
     226         147 :         backend = db_open(
     227             :                 mem_ctx,
     228             :                 db_path,
     229             :                 0,
     230             :                 TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH,
     231             :                 O_RDWR|O_CREAT,
     232             :                 0600,
     233             :                 DBWRAP_LOCK_ORDER_3,
     234             :                 DBWRAP_FLAG_NONE);
     235         147 :         TALLOC_FREE(db_path);
     236         147 :         if (backend == NULL) {
     237           0 :                 DBG_WARNING("Could not open g_lock.tdb\n");
     238           0 :                 return NULL;
     239             :         }
     240             : 
     241         147 :         ctx = g_lock_ctx_init_backend(mem_ctx, msg, &backend);
     242         147 :         return ctx;
     243             : }
     244             : 
     245         570 : static NTSTATUS g_lock_cleanup_dead(
     246             :         struct db_record *rec,
     247             :         struct g_lock *lck,
     248             :         struct server_id *dead_blocker)
     249             : {
     250         570 :         bool modified = false;
     251             :         bool exclusive_died;
     252         570 :         NTSTATUS status = NT_STATUS_OK;
     253             :         struct server_id_buf tmp;
     254             : 
     255         570 :         if (dead_blocker == NULL) {
     256         566 :                 return NT_STATUS_OK;
     257             :         }
     258             : 
     259           4 :         exclusive_died = server_id_equal(dead_blocker, &lck->exclusive);
     260             : 
     261           4 :         if (exclusive_died) {
     262           1 :                 DBG_DEBUG("Exclusive holder %s died\n",
     263             :                           server_id_str_buf(lck->exclusive, &tmp));
     264           1 :                 lck->exclusive.pid = 0;
     265           1 :                 modified = true;
     266             :         }
     267             : 
     268           4 :         if (lck->num_shared != 0) {
     269             :                 bool shared_died;
     270             :                 struct server_id shared;
     271             : 
     272           6 :                 g_lock_get_shared(lck, 0, &shared);
     273           3 :                 shared_died = server_id_equal(dead_blocker, &shared);
     274             : 
     275           3 :                 if (shared_died) {
     276           3 :                         DBG_DEBUG("Shared holder %s died\n",
     277             :                                   server_id_str_buf(shared, &tmp));
     278           3 :                         g_lock_del_shared(lck, 0);
     279           3 :                         modified = true;
     280             :                 }
     281             :         }
     282             : 
     283           1 :         if (modified) {
     284           4 :                 status = g_lock_store(rec, lck, NULL, NULL, 0);
     285           4 :                 if (!NT_STATUS_IS_OK(status)) {
     286           0 :                         DBG_DEBUG("g_lock_store() failed: %s\n",
     287             :                                   nt_errstr(status));
     288             :                 }
     289             :         }
     290             : 
     291           4 :         return status;
     292             : }
     293             : 
     294         420 : static ssize_t g_lock_find_shared(
     295             :         struct g_lock *lck,
     296             :         const struct server_id *self)
     297             : {
     298             :         size_t i;
     299             : 
     300         427 :         for (i=0; i<lck->num_shared; i++) {
     301             :                 struct server_id shared;
     302             :                 bool same;
     303             : 
     304          22 :                 g_lock_get_shared(lck, i, &shared);
     305             : 
     306          11 :                 same = server_id_equal(self, &shared);
     307          11 :                 if (same) {
     308           4 :                         return i;
     309             :                 }
     310             :         }
     311             : 
     312         409 :         return -1;
     313             : }
     314             : 
     315          18 : static void g_lock_cleanup_shared(struct g_lock *lck)
     316             : {
     317             :         size_t i;
     318             :         struct server_id check;
     319             :         bool exists;
     320             : 
     321          18 :         if (lck->num_shared == 0) {
     322           5 :                 return;
     323             :         }
     324             : 
     325             :         /*
     326             :          * Read locks can stay around forever if the process dies. Do
     327             :          * a heuristic check for process existence: Check one random
     328             :          * process for existence. Hopefully this will keep runaway
     329             :          * read locks under control.
     330             :          */
     331          13 :         i = generate_random() % lck->num_shared;
     332          26 :         g_lock_get_shared(lck, i, &check);
     333             : 
     334          13 :         exists = serverid_exists(&check);
     335          13 :         if (!exists) {
     336             :                 struct server_id_buf tmp;
     337           5 :                 DBG_DEBUG("Shared locker %s died -- removing\n",
     338             :                           server_id_str_buf(check, &tmp));
     339           5 :                 g_lock_del_shared(lck, i);
     340             :         }
     341             : }
     342             : 
     343             : struct g_lock_lock_state {
     344             :         struct tevent_context *ev;
     345             :         struct g_lock_ctx *ctx;
     346             :         TDB_DATA key;
     347             :         enum g_lock_type type;
     348             :         bool retry;
     349             : };
     350             : 
     351             : struct g_lock_lock_fn_state {
     352             :         struct g_lock_lock_state *req_state;
     353             :         struct server_id *dead_blocker;
     354             : 
     355             :         struct tevent_req *watch_req;
     356             :         NTSTATUS status;
     357             : };
     358             : 
     359             : static int g_lock_lock_state_destructor(struct g_lock_lock_state *s);
     360             : 
     361         570 : static NTSTATUS g_lock_trylock(
     362             :         struct db_record *rec,
     363             :         struct g_lock_lock_fn_state *state,
     364             :         TDB_DATA data,
     365             :         struct server_id *blocker)
     366             : {
     367         570 :         struct g_lock_lock_state *req_state = state->req_state;
     368         570 :         struct server_id self = messaging_server_id(req_state->ctx->msg);
     369         570 :         enum g_lock_type type = req_state->type;
     370         570 :         bool retry = req_state->retry;
     371         570 :         struct g_lock lck = { .exclusive.pid = 0 };
     372             :         struct server_id_buf tmp;
     373             :         NTSTATUS status;
     374             :         bool ok;
     375             : 
     376         570 :         ok = g_lock_parse(data.dptr, data.dsize, &lck);
     377         570 :         if (!ok) {
     378           0 :                 DBG_DEBUG("g_lock_parse failed\n");
     379           0 :                 return NT_STATUS_INTERNAL_DB_CORRUPTION;
     380             :         }
     381             : 
     382         570 :         status = g_lock_cleanup_dead(rec, &lck, state->dead_blocker);
     383         570 :         if (!NT_STATUS_IS_OK(status)) {
     384           0 :                 DBG_DEBUG("g_lock_cleanup_dead() failed: %s\n",
     385             :                           nt_errstr(status));
     386           0 :                 return status;
     387             :         }
     388             : 
     389         570 :         if (lck.exclusive.pid != 0) {
     390         152 :                 bool self_exclusive = server_id_equal(&self, &lck.exclusive);
     391             : 
     392         152 :                 if (!self_exclusive) {
     393         148 :                         bool exists = serverid_exists(&lck.exclusive);
     394         148 :                         if (!exists) {
     395           0 :                                 lck.exclusive = (struct server_id) { .pid=0 };
     396           0 :                                 goto noexclusive;
     397             :                         }
     398             : 
     399         148 :                         DBG_DEBUG("%s has an exclusive lock\n",
     400             :                                   server_id_str_buf(lck.exclusive, &tmp));
     401             : 
     402         148 :                         if (type == G_LOCK_DOWNGRADE) {
     403             :                                 struct server_id_buf tmp2;
     404           0 :                                 DBG_DEBUG("%s: Trying to downgrade %s\n",
     405             :                                           server_id_str_buf(self, &tmp),
     406             :                                           server_id_str_buf(
     407             :                                                   lck.exclusive, &tmp2));
     408           0 :                                 return NT_STATUS_NOT_LOCKED;
     409             :                         }
     410             : 
     411         148 :                         if (type == G_LOCK_UPGRADE) {
     412             :                                 ssize_t shared_idx;
     413           1 :                                 shared_idx = g_lock_find_shared(&lck, &self);
     414             : 
     415           1 :                                 if (shared_idx == -1) {
     416           0 :                                         DBG_DEBUG("Trying to upgrade %s "
     417             :                                                   "without "
     418             :                                                   "existing shared lock\n",
     419             :                                                   server_id_str_buf(
     420             :                                                           self, &tmp));
     421           0 :                                         return NT_STATUS_NOT_LOCKED;
     422             :                                 }
     423             : 
     424             :                                 /*
     425             :                                  * We're trying to upgrade, and the
     426             :                                  * exlusive lock is taken by someone
     427             :                                  * else. This means that someone else
     428             :                                  * is waiting for us to give up our
     429             :                                  * shared lock. If we now also wait
     430             :                                  * for someone to give their shared
     431             :                                  * lock, we will deadlock.
     432             :                                  */
     433             : 
     434           1 :                                 DBG_DEBUG("Trying to upgrade %s while "
     435             :                                           "someone else is also "
     436             :                                           "trying to upgrade\n",
     437             :                                           server_id_str_buf(self, &tmp));
     438           1 :                                 return NT_STATUS_POSSIBLE_DEADLOCK;
     439             :                         }
     440             : 
     441         147 :                         DBG_DEBUG("Waiting for lck.exclusive=%s\n",
     442             :                                   server_id_str_buf(lck.exclusive, &tmp));
     443             : 
     444         147 :                         *blocker = lck.exclusive;
     445         147 :                         return NT_STATUS_LOCK_NOT_GRANTED;
     446             :                 }
     447             : 
     448           4 :                 if (type == G_LOCK_DOWNGRADE) {
     449           0 :                         DBG_DEBUG("Downgrading %s from WRITE to READ\n",
     450             :                                   server_id_str_buf(self, &tmp));
     451             : 
     452           0 :                         lck.exclusive = (struct server_id) { .pid = 0 };
     453           0 :                         goto do_shared;
     454             :                 }
     455             : 
     456           4 :                 if (!retry) {
     457           1 :                         DBG_DEBUG("%s already locked by self\n",
     458             :                                   server_id_str_buf(self, &tmp));
     459           1 :                         return NT_STATUS_WAS_LOCKED;
     460             :                 }
     461             : 
     462           3 :                 if (lck.num_shared != 0) {
     463           2 :                         g_lock_get_shared(&lck, 0, blocker);
     464             : 
     465           1 :                         DBG_DEBUG("Continue waiting for shared lock %s\n",
     466             :                                   server_id_str_buf(*blocker, &tmp));
     467             : 
     468           1 :                         return NT_STATUS_LOCK_NOT_GRANTED;
     469             :                 }
     470             : 
     471           2 :                 talloc_set_destructor(req_state, NULL);
     472             : 
     473             :                 /*
     474             :                  * Retry after a conflicting lock was released
     475             :                  */
     476           2 :                 return NT_STATUS_OK;
     477             :         }
     478             : 
     479         418 : noexclusive:
     480             : 
     481         418 :         if (type == G_LOCK_UPGRADE) {
     482           3 :                 ssize_t shared_idx = g_lock_find_shared(&lck, &self);
     483             : 
     484           3 :                 if (shared_idx == -1) {
     485           0 :                         DBG_DEBUG("Trying to upgrade %s without "
     486             :                                   "existing shared lock\n",
     487             :                                   server_id_str_buf(self, &tmp));
     488           0 :                         return NT_STATUS_NOT_LOCKED;
     489             :                 }
     490             : 
     491           3 :                 g_lock_del_shared(&lck, shared_idx);
     492           3 :                 type = G_LOCK_WRITE;
     493             :         }
     494             : 
     495         415 :         if (type == G_LOCK_WRITE) {
     496         416 :                 ssize_t shared_idx = g_lock_find_shared(&lck, &self);
     497             : 
     498         416 :                 if (shared_idx != -1) {
     499           0 :                         DBG_DEBUG("Trying to writelock existing shared %s\n",
     500             :                                   server_id_str_buf(self, &tmp));
     501           0 :                         return NT_STATUS_WAS_LOCKED;
     502             :                 }
     503             : 
     504         416 :                 lck.exclusive = self;
     505             : 
     506         416 :                 status = g_lock_store(rec, &lck, NULL, NULL, 0);
     507         416 :                 if (!NT_STATUS_IS_OK(status)) {
     508           0 :                         DBG_DEBUG("g_lock_store() failed: %s\n",
     509             :                                   nt_errstr(status));
     510           0 :                         return status;
     511             :                 }
     512             : 
     513         416 :                 if (lck.num_shared != 0) {
     514           5 :                         talloc_set_destructor(
     515             :                                 req_state, g_lock_lock_state_destructor);
     516             : 
     517          10 :                         g_lock_get_shared(&lck, 0, blocker);
     518             : 
     519           5 :                         DBG_DEBUG("Waiting for %zu shared locks, "
     520             :                                   "picking blocker %s\n",
     521             :                                   lck.num_shared,
     522             :                                   server_id_str_buf(*blocker, &tmp));
     523             : 
     524           5 :                         return NT_STATUS_LOCK_NOT_GRANTED;
     525             :                 }
     526             : 
     527         411 :                 talloc_set_destructor(req_state, NULL);
     528             : 
     529         411 :                 return NT_STATUS_OK;
     530             :         }
     531             : 
     532           2 : do_shared:
     533             : 
     534           2 :         if (lck.num_shared == 0) {
     535           2 :                 status = g_lock_store(rec, &lck, &self, NULL, 0);
     536           2 :                 if (!NT_STATUS_IS_OK(status)) {
     537           0 :                         DBG_DEBUG("g_lock_store() failed: %s\n",
     538             :                                   nt_errstr(status));
     539             :                 }
     540             : 
     541           2 :                 return status;
     542             :         }
     543             : 
     544           0 :         g_lock_cleanup_shared(&lck);
     545             : 
     546           0 :         status = g_lock_store(rec, &lck, &self, NULL, 0);
     547           0 :         if (!NT_STATUS_IS_OK(status)) {
     548           0 :                 DBG_DEBUG("g_lock_store() failed: %s\n",
     549             :                           nt_errstr(status));
     550           0 :                 return status;
     551             :         }
     552             : 
     553           0 :         return NT_STATUS_OK;
     554             : }
     555             : 
     556         570 : static void g_lock_lock_fn(
     557             :         struct db_record *rec,
     558             :         TDB_DATA value,
     559             :         void *private_data)
     560             : {
     561         570 :         struct g_lock_lock_fn_state *state = private_data;
     562         570 :         struct server_id blocker = {0};
     563             : 
     564         570 :         state->status = g_lock_trylock(rec, state, value, &blocker);
     565         570 :         if (!NT_STATUS_IS_OK(state->status)) {
     566         155 :                 DBG_DEBUG("g_lock_trylock returned %s\n",
     567             :                           nt_errstr(state->status));
     568             :         }
     569         570 :         if (!NT_STATUS_EQUAL(state->status, NT_STATUS_LOCK_NOT_GRANTED)) {
     570         417 :                 return;
     571             :         }
     572             : 
     573         297 :         state->watch_req = dbwrap_watched_watch_send(
     574         269 :                 state->req_state, state->req_state->ev, rec, blocker);
     575         153 :         if (state->watch_req == NULL) {
     576           0 :                 state->status = NT_STATUS_NO_MEMORY;
     577             :         }
     578             : }
     579             : 
     580           2 : static int g_lock_lock_state_destructor(struct g_lock_lock_state *s)
     581             : {
     582           2 :         NTSTATUS status = g_lock_unlock(s->ctx, s->key);
     583           2 :         if (!NT_STATUS_IS_OK(status)) {
     584           0 :                 DBG_DEBUG("g_lock_unlock failed: %s\n", nt_errstr(status));
     585             :         }
     586           2 :         return 0;
     587             : }
     588             : 
     589             : static void g_lock_lock_retry(struct tevent_req *subreq);
     590             : 
     591         422 : struct tevent_req *g_lock_lock_send(TALLOC_CTX *mem_ctx,
     592             :                                     struct tevent_context *ev,
     593             :                                     struct g_lock_ctx *ctx,
     594             :                                     TDB_DATA key,
     595             :                                     enum g_lock_type type)
     596             : {
     597             :         struct tevent_req *req;
     598             :         struct g_lock_lock_state *state;
     599             :         struct g_lock_lock_fn_state fn_state;
     600             :         NTSTATUS status;
     601             :         bool ok;
     602             : 
     603         422 :         req = tevent_req_create(mem_ctx, &state, struct g_lock_lock_state);
     604         422 :         if (req == NULL) {
     605           0 :                 return NULL;
     606             :         }
     607         422 :         state->ev = ev;
     608         422 :         state->ctx = ctx;
     609         422 :         state->key = key;
     610         422 :         state->type = type;
     611             : 
     612         422 :         fn_state = (struct g_lock_lock_fn_state) {
     613             :                 .req_state = state,
     614             :         };
     615             : 
     616         422 :         status = dbwrap_do_locked(ctx->db, key, g_lock_lock_fn, &fn_state);
     617         422 :         if (tevent_req_nterror(req, status)) {
     618           0 :                 DBG_DEBUG("dbwrap_do_locked failed: %s\n",
     619             :                           nt_errstr(status));
     620           0 :                 return tevent_req_post(req, ev);
     621             :         }
     622             : 
     623         422 :         if (NT_STATUS_IS_OK(fn_state.status)) {
     624         289 :                 tevent_req_done(req);
     625         289 :                 return tevent_req_post(req, ev);
     626             :         }
     627         133 :         if (!NT_STATUS_EQUAL(fn_state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
     628           2 :                 tevent_req_nterror(req, fn_state.status);
     629           2 :                 return tevent_req_post(req, ev);
     630             :         }
     631             : 
     632         131 :         if (tevent_req_nomem(fn_state.watch_req, req)) {
     633           0 :                 return tevent_req_post(req, ev);
     634             :         }
     635             : 
     636         262 :         ok = tevent_req_set_endtime(
     637             :                 fn_state.watch_req,
     638         131 :                 state->ev,
     639         131 :                 timeval_current_ofs(5 + generate_random() % 5, 0));
     640         131 :         if (!ok) {
     641           0 :                 tevent_req_oom(req);
     642           0 :                 return tevent_req_post(req, ev);
     643             :         }
     644         131 :         tevent_req_set_callback(fn_state.watch_req, g_lock_lock_retry, req);
     645             : 
     646         131 :         return req;
     647             : }
     648             : 
     649         148 : static void g_lock_lock_retry(struct tevent_req *subreq)
     650             : {
     651         148 :         struct tevent_req *req = tevent_req_callback_data(
     652             :                 subreq, struct tevent_req);
     653         148 :         struct g_lock_lock_state *state = tevent_req_data(
     654             :                 req, struct g_lock_lock_state);
     655             :         struct g_lock_lock_fn_state fn_state;
     656         148 :         struct server_id blocker = { .pid = 0 };
     657         148 :         bool blockerdead = false;
     658             :         NTSTATUS status;
     659             : 
     660         148 :         status = dbwrap_watched_watch_recv(subreq, &blockerdead, &blocker);
     661         148 :         DBG_DEBUG("watch_recv returned %s\n", nt_errstr(status));
     662         148 :         TALLOC_FREE(subreq);
     663             : 
     664         148 :         if (!NT_STATUS_IS_OK(status) &&
     665           0 :             !NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) {
     666           0 :                 tevent_req_nterror(req, status);
     667           0 :                 return;
     668             :         }
     669             : 
     670         148 :         state->retry = true;
     671             : 
     672         148 :         fn_state = (struct g_lock_lock_fn_state) {
     673             :                 .req_state = state,
     674         148 :                 .dead_blocker = blockerdead ? &blocker : NULL,
     675             :         };
     676             : 
     677         148 :         status = dbwrap_do_locked(state->ctx->db, state->key,
     678             :                                   g_lock_lock_fn, &fn_state);
     679         148 :         if (tevent_req_nterror(req, status)) {
     680           0 :                 DBG_DEBUG("dbwrap_do_locked failed: %s\n",
     681             :                           nt_errstr(status));
     682           0 :                 return;
     683             :         }
     684             : 
     685         148 :         if (NT_STATUS_IS_OK(fn_state.status)) {
     686         126 :                 tevent_req_done(req);
     687         126 :                 return;
     688             :         }
     689          22 :         if (!NT_STATUS_EQUAL(fn_state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
     690           0 :                 tevent_req_nterror(req, fn_state.status);
     691           0 :                 return;
     692             :         }
     693             : 
     694          22 :         if (tevent_req_nomem(fn_state.watch_req, req)) {
     695           0 :                 return;
     696             :         }
     697             : 
     698          22 :         if (!tevent_req_set_endtime(
     699             :                     fn_state.watch_req, state->ev,
     700          22 :                     timeval_current_ofs(5 + generate_random() % 5, 0))) {
     701           0 :                 return;
     702             :         }
     703          22 :         tevent_req_set_callback(fn_state.watch_req, g_lock_lock_retry, req);
     704             : }
     705             : 
     706         421 : NTSTATUS g_lock_lock_recv(struct tevent_req *req)
     707             : {
     708         421 :         struct g_lock_lock_state *state = tevent_req_data(
     709             :                 req, struct g_lock_lock_state);
     710         421 :         struct g_lock_ctx *ctx = state->ctx;
     711             :         NTSTATUS status;
     712             : 
     713         421 :         if (tevent_req_is_nterror(req, &status)) {
     714           6 :                 return status;
     715             :         }
     716             : 
     717         550 :         if ((ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) &&
     718         295 :             ((state->type == G_LOCK_READ) ||
     719         160 :              (state->type == G_LOCK_WRITE))) {
     720         160 :                 const char *name = dbwrap_name(ctx->db);
     721         160 :                 dbwrap_lock_order_lock(name, ctx->lock_order);
     722             :         }
     723             : 
     724         415 :         return NT_STATUS_OK;
     725             : }
     726             : 
     727             : struct g_lock_lock_simple_state {
     728             :         struct server_id me;
     729             :         enum g_lock_type type;
     730             :         NTSTATUS status;
     731             : };
     732             : 
     733     1100229 : static void g_lock_lock_simple_fn(
     734             :         struct db_record *rec,
     735             :         TDB_DATA value,
     736             :         void *private_data)
     737             : {
     738     1100229 :         struct g_lock_lock_simple_state *state = private_data;
     739             :         struct server_id_buf buf;
     740     1100229 :         struct g_lock lck = { .exclusive.pid = 0 };
     741             :         bool ok;
     742             : 
     743     1100229 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
     744     1100229 :         if (!ok) {
     745           0 :                 DBG_DEBUG("g_lock_parse failed\n");
     746           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
     747      960717 :                 return;
     748             :         }
     749             : 
     750     1100229 :         if (lck.exclusive.pid != 0) {
     751         163 :                 DBG_DEBUG("locked by %s\n",
     752             :                           server_id_str_buf(lck.exclusive, &buf));
     753         160 :                 goto not_granted;
     754             :         }
     755             : 
     756     1100066 :         if (state->type == G_LOCK_WRITE) {
     757     1100048 :                 if (lck.num_shared != 0) {
     758           2 :                         DBG_DEBUG("num_shared=%zu\n", lck.num_shared);
     759           0 :                         goto not_granted;
     760             :                 }
     761     1100046 :                 lck.exclusive = state->me;
     762     1100046 :                 state->status = g_lock_store(rec, &lck, NULL, NULL, 0);
     763     1100046 :                 return;
     764             :         }
     765             : 
     766          18 :         if (state->type == G_LOCK_READ) {
     767          18 :                 g_lock_cleanup_shared(&lck);
     768          18 :                 state->status = g_lock_store(rec, &lck, &state->me, NULL, 0);
     769          18 :                 return;
     770             :         }
     771             : 
     772           0 : not_granted:
     773         165 :         state->status = NT_STATUS_LOCK_NOT_GRANTED;
     774             : }
     775             : 
     776     1100232 : NTSTATUS g_lock_lock(struct g_lock_ctx *ctx, TDB_DATA key,
     777             :                      enum g_lock_type type, struct timeval timeout)
     778             : {
     779             :         TALLOC_CTX *frame;
     780             :         struct tevent_context *ev;
     781             :         struct tevent_req *req;
     782             :         struct timeval end;
     783             :         NTSTATUS status;
     784             : 
     785     1100232 :         if ((type == G_LOCK_READ) || (type == G_LOCK_WRITE)) {
     786             :                 /*
     787             :                  * This is an abstraction violation: Normally we do
     788             :                  * the sync wrappers around async functions with full
     789             :                  * nested event contexts. However, this is used in
     790             :                  * very hot code paths, so avoid the event context
     791             :                  * creation for the good path where there's no lock
     792             :                  * contention. My benchmark gave a factor of 2
     793             :                  * improvement for lock/unlock.
     794             :                  */
     795     2200458 :                 struct g_lock_lock_simple_state state = {
     796     1100229 :                         .me = messaging_server_id(ctx->msg),
     797             :                         .type = type,
     798             :                 };
     799     1100229 :                 status = dbwrap_do_locked(
     800             :                         ctx->db, key, g_lock_lock_simple_fn, &state);
     801     1100229 :                 if (!NT_STATUS_IS_OK(status)) {
     802           0 :                         DBG_DEBUG("dbwrap_do_locked() failed: %s\n",
     803             :                                   nt_errstr(status));
     804     1100064 :                         return status;
     805             :                 }
     806             : 
     807     1100229 :                 DBG_DEBUG("status=%s, state.status=%s\n",
     808             :                           nt_errstr(status),
     809             :                           nt_errstr(state.status));
     810             : 
     811     1100229 :                 if (NT_STATUS_IS_OK(state.status)) {
     812     1100064 :                         if (ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) {
     813     1100035 :                                 const char *name = dbwrap_name(ctx->db);
     814     1100035 :                                 dbwrap_lock_order_lock(name, ctx->lock_order);
     815             :                         }
     816     1100064 :                         return NT_STATUS_OK;
     817             :                 }
     818         165 :                 if (!NT_STATUS_EQUAL(
     819             :                             state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
     820           0 :                         return state.status;
     821             :                 }
     822             : 
     823             :                 /*
     824             :                  * Fall back to the full g_lock_trylock logic,
     825             :                  * g_lock_lock_simple_fn() called above only covers
     826             :                  * the uncontended path.
     827             :                  */
     828             :         }
     829             : 
     830         168 :         frame = talloc_stackframe();
     831         168 :         status = NT_STATUS_NO_MEMORY;
     832             : 
     833         168 :         ev = samba_tevent_context_init(frame);
     834         168 :         if (ev == NULL) {
     835           0 :                 goto fail;
     836             :         }
     837         168 :         req = g_lock_lock_send(frame, ev, ctx, key, type);
     838         168 :         if (req == NULL) {
     839           0 :                 goto fail;
     840             :         }
     841         168 :         end = timeval_current_ofs(timeout.tv_sec, timeout.tv_usec);
     842         168 :         if (!tevent_req_set_endtime(req, ev, end)) {
     843           0 :                 goto fail;
     844             :         }
     845         168 :         if (!tevent_req_poll_ntstatus(req, ev, &status)) {
     846           0 :                 goto fail;
     847             :         }
     848         168 :         status = g_lock_lock_recv(req);
     849         168 :  fail:
     850         168 :         TALLOC_FREE(frame);
     851         168 :         return status;
     852             : }
     853             : 
     854             : struct g_lock_unlock_state {
     855             :         struct server_id self;
     856             :         NTSTATUS status;
     857             : };
     858             : 
     859     1100465 : static void g_lock_unlock_fn(
     860             :         struct db_record *rec,
     861             :         TDB_DATA value,
     862             :         void *private_data)
     863             : {
     864     1100465 :         struct g_lock_unlock_state *state = private_data;
     865             :         struct server_id_buf tmp1, tmp2;
     866             :         struct g_lock lck;
     867             :         size_t i;
     868             :         bool ok, exclusive;
     869             : 
     870     1100465 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
     871     1100465 :         if (!ok) {
     872           0 :                 DBG_DEBUG("g_lock_parse() failed\n");
     873           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
     874      366511 :                 return;
     875             :         }
     876             : 
     877     1100465 :         exclusive = server_id_equal(&state->self, &lck.exclusive);
     878             : 
     879     1100478 :         for (i=0; i<lck.num_shared; i++) {
     880             :                 struct server_id shared;
     881          40 :                 g_lock_get_shared(&lck, i, &shared);
     882          21 :                 if (server_id_equal(&state->self, &shared)) {
     883           2 :                         break;
     884             :                 }
     885             :         }
     886             : 
     887     1100465 :         if (i < lck.num_shared) {
     888           8 :                 if (exclusive) {
     889           0 :                         DBG_DEBUG("%s both exclusive and shared (%zu)\n",
     890             :                                   server_id_str_buf(state->self, &tmp1),
     891             :                                   i);
     892           0 :                         state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
     893           0 :                         return;
     894             :                 }
     895           8 :                 g_lock_del_shared(&lck, i);
     896             :         } else {
     897     1100457 :                 if (!exclusive) {
     898           1 :                         DBG_DEBUG("Lock not found, self=%s, lck.exclusive=%s, "
     899             :                                   "num_rec=%zu\n",
     900             :                                   server_id_str_buf(state->self, &tmp1),
     901             :                                   server_id_str_buf(lck.exclusive, &tmp2),
     902             :                                   lck.num_shared);
     903           1 :                         state->status = NT_STATUS_NOT_FOUND;
     904           1 :                         return;
     905             :                 }
     906     1100456 :                 lck.exclusive = (struct server_id) { .pid = 0 };
     907             :         }
     908             : 
     909     2061511 :         if ((lck.exclusive.pid == 0) &&
     910     2061504 :             (lck.num_shared == 0) &&
     911     1100457 :             (lck.datalen == 0)) {
     912      426949 :                 state->status = dbwrap_record_delete(rec);
     913      426949 :                 return;
     914             :         }
     915             : 
     916      673515 :         state->status = g_lock_store(rec, &lck, NULL, NULL, 0);
     917             : }
     918             : 
     919     1100465 : NTSTATUS g_lock_unlock(struct g_lock_ctx *ctx, TDB_DATA key)
     920             : {
     921     2061513 :         struct g_lock_unlock_state state = {
     922     1100465 :                 .self = messaging_server_id(ctx->msg),
     923             :         };
     924             :         NTSTATUS status;
     925             : 
     926     1100465 :         status = dbwrap_do_locked(ctx->db, key, g_lock_unlock_fn, &state);
     927     1100465 :         if (!NT_STATUS_IS_OK(status)) {
     928           0 :                 DBG_WARNING("dbwrap_do_locked failed: %s\n",
     929             :                             nt_errstr(status));
     930           0 :                 return status;
     931             :         }
     932     1100465 :         if (!NT_STATUS_IS_OK(state.status)) {
     933           1 :                 DBG_WARNING("g_lock_unlock_fn failed: %s\n",
     934             :                             nt_errstr(state.status));
     935           1 :                 return state.status;
     936             :         }
     937             : 
     938     1100464 :         if (ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) {
     939     1100195 :                 const char *name = dbwrap_name(ctx->db);
     940     1100195 :                 dbwrap_lock_order_unlock(name, ctx->lock_order);
     941             :         }
     942             : 
     943     1100464 :         return NT_STATUS_OK;
     944             : }
     945             : 
     946             : struct g_lock_writev_data_state {
     947             :         TDB_DATA key;
     948             :         struct server_id self;
     949             :         const TDB_DATA *dbufs;
     950             :         size_t num_dbufs;
     951             :         NTSTATUS status;
     952             : };
     953             : 
     954     1762264 : static void g_lock_writev_data_fn(
     955             :         struct db_record *rec,
     956             :         TDB_DATA value,
     957             :         void *private_data)
     958             : {
     959     1762264 :         struct g_lock_writev_data_state *state = private_data;
     960             :         struct g_lock lck;
     961             :         bool exclusive;
     962             :         bool ok;
     963             : 
     964     1762264 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
     965     1762264 :         if (!ok) {
     966           0 :                 DBG_DEBUG("g_lock_parse for %s failed\n",
     967             :                           hex_encode_talloc(talloc_tos(),
     968             :                                             state->key.dptr,
     969             :                                             state->key.dsize));
     970           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
     971           1 :                 return;
     972             :         }
     973             : 
     974     1762264 :         exclusive = server_id_equal(&state->self, &lck.exclusive);
     975             : 
     976             :         /*
     977             :          * Make sure we're really exclusive. We are marked as
     978             :          * exclusive when we are waiting for an exclusive lock
     979             :          */
     980     1762264 :         exclusive &= (lck.num_shared == 0);
     981             : 
     982     1762264 :         if (!exclusive) {
     983             :                 struct server_id_buf buf1, buf2;
     984           1 :                 DBG_DEBUG("Not locked by us: self=%s, lck.exclusive=%s, "
     985             :                           "lck.num_shared=%zu\n",
     986             :                           server_id_str_buf(state->self, &buf1),
     987             :                           server_id_str_buf(lck.exclusive, &buf2),
     988             :                           lck.num_shared);
     989           1 :                 state->status = NT_STATUS_NOT_LOCKED;
     990           0 :                 return;
     991             :         }
     992             : 
     993     1762263 :         lck.unique_data_epoch = generate_unique_u64(lck.unique_data_epoch);
     994     1762263 :         lck.data = NULL;
     995     1762263 :         lck.datalen = 0;
     996     1762263 :         state->status = g_lock_store(
     997             :                 rec, &lck, NULL, state->dbufs, state->num_dbufs);
     998             : }
     999             : 
    1000     1762264 : NTSTATUS g_lock_writev_data(
    1001             :         struct g_lock_ctx *ctx,
    1002             :         TDB_DATA key,
    1003             :         const TDB_DATA *dbufs,
    1004             :         size_t num_dbufs)
    1005             : {
    1006     3524528 :         struct g_lock_writev_data_state state = {
    1007             :                 .key = key,
    1008     1762264 :                 .self = messaging_server_id(ctx->msg),
    1009             :                 .dbufs = dbufs,
    1010             :                 .num_dbufs = num_dbufs,
    1011             :         };
    1012             :         NTSTATUS status;
    1013             : 
    1014     1762264 :         status = dbwrap_do_locked(
    1015             :                 ctx->db, key, g_lock_writev_data_fn, &state);
    1016     1762264 :         if (!NT_STATUS_IS_OK(status)) {
    1017           0 :                 DBG_WARNING("dbwrap_do_locked failed: %s\n",
    1018             :                             nt_errstr(status));
    1019           0 :                 return status;
    1020             :         }
    1021     1762264 :         if (!NT_STATUS_IS_OK(state.status)) {
    1022           1 :                 DBG_WARNING("g_lock_writev_data_fn failed: %s\n",
    1023             :                             nt_errstr(state.status));
    1024           1 :                 return state.status;
    1025             :         }
    1026             : 
    1027     1762263 :         return NT_STATUS_OK;
    1028             : }
    1029             : 
    1030      425592 : NTSTATUS g_lock_write_data(struct g_lock_ctx *ctx, TDB_DATA key,
    1031             :                            const uint8_t *buf, size_t buflen)
    1032             : {
    1033      425592 :         TDB_DATA dbuf = {
    1034             :                 .dptr = discard_const_p(uint8_t, buf),
    1035             :                 .dsize = buflen,
    1036             :         };
    1037      425592 :         return g_lock_writev_data(ctx, key, &dbuf, 1);
    1038             : }
    1039             : 
    1040             : struct g_lock_locks_state {
    1041             :         int (*fn)(TDB_DATA key, void *private_data);
    1042             :         void *private_data;
    1043             : };
    1044             : 
    1045       17857 : static int g_lock_locks_fn(struct db_record *rec, void *priv)
    1046             : {
    1047             :         TDB_DATA key;
    1048       17857 :         struct g_lock_locks_state *state = (struct g_lock_locks_state *)priv;
    1049             : 
    1050       17857 :         key = dbwrap_record_get_key(rec);
    1051       17857 :         return state->fn(key, state->private_data);
    1052             : }
    1053             : 
    1054        5944 : int g_lock_locks(struct g_lock_ctx *ctx,
    1055             :                  int (*fn)(TDB_DATA key, void *private_data),
    1056             :                  void *private_data)
    1057             : {
    1058             :         struct g_lock_locks_state state;
    1059             :         NTSTATUS status;
    1060             :         int count;
    1061             : 
    1062        5944 :         state.fn = fn;
    1063        5944 :         state.private_data = private_data;
    1064             : 
    1065        5944 :         status = dbwrap_traverse_read(ctx->db, g_lock_locks_fn, &state, &count);
    1066        5944 :         if (!NT_STATUS_IS_OK(status)) {
    1067           0 :                 return -1;
    1068             :         }
    1069        5944 :         return count;
    1070             : }
    1071             : 
    1072             : struct g_lock_dump_state {
    1073             :         TALLOC_CTX *mem_ctx;
    1074             :         TDB_DATA key;
    1075             :         void (*fn)(struct server_id exclusive,
    1076             :                    size_t num_shared,
    1077             :                    struct server_id *shared,
    1078             :                    const uint8_t *data,
    1079             :                    size_t datalen,
    1080             :                    void *private_data);
    1081             :         void *private_data;
    1082             :         NTSTATUS status;
    1083             :         enum dbwrap_req_state req_state;
    1084             : };
    1085             : 
    1086     4511626 : static void g_lock_dump_fn(TDB_DATA key, TDB_DATA data,
    1087             :                            void *private_data)
    1088             : {
    1089     4511626 :         struct g_lock_dump_state *state = private_data;
    1090     4511626 :         struct g_lock lck = (struct g_lock) { .exclusive.pid = 0 };
    1091     4511626 :         struct server_id *shared = NULL;
    1092             :         size_t i;
    1093             :         bool ok;
    1094             : 
    1095     4511626 :         ok = g_lock_parse(data.dptr, data.dsize, &lck);
    1096     4511626 :         if (!ok) {
    1097           0 :                 DBG_DEBUG("g_lock_parse failed for %s\n",
    1098             :                           hex_encode_talloc(talloc_tos(),
    1099             :                                             state->key.dptr,
    1100             :                                             state->key.dsize));
    1101           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1102           0 :                 return;
    1103             :         }
    1104             : 
    1105     4511626 :         shared = talloc_array(
    1106             :                 state->mem_ctx, struct server_id, lck.num_shared);
    1107     4511626 :         if (shared == NULL) {
    1108           0 :                 DBG_DEBUG("talloc failed\n");
    1109           0 :                 state->status = NT_STATUS_NO_MEMORY;
    1110           0 :                 return;
    1111             :         }
    1112             : 
    1113     4503790 :         for (i=0; i<lck.num_shared; i++) {
    1114          36 :                 g_lock_get_shared(&lck, i, &shared[i]);
    1115             :         }
    1116             : 
    1117     8399312 :         state->fn(lck.exclusive,
    1118             :                   lck.num_shared,
    1119             :                   shared,
    1120     4511626 :                   lck.data,
    1121             :                   lck.datalen,
    1122             :                   state->private_data);
    1123             : 
    1124     4511626 :         TALLOC_FREE(shared);
    1125             : 
    1126     4511626 :         state->status = NT_STATUS_OK;
    1127             : }
    1128             : 
    1129     4933934 : NTSTATUS g_lock_dump(struct g_lock_ctx *ctx, TDB_DATA key,
    1130             :                      void (*fn)(struct server_id exclusive,
    1131             :                                 size_t num_shared,
    1132             :                                 struct server_id *shared,
    1133             :                                 const uint8_t *data,
    1134             :                                 size_t datalen,
    1135             :                                 void *private_data),
    1136             :                      void *private_data)
    1137             : {
    1138     4933934 :         struct g_lock_dump_state state = {
    1139             :                 .mem_ctx = ctx, .key = key,
    1140             :                 .fn = fn, .private_data = private_data
    1141             :         };
    1142             :         NTSTATUS status;
    1143             : 
    1144     4933934 :         status = dbwrap_parse_record(ctx->db, key, g_lock_dump_fn, &state);
    1145     4933934 :         if (!NT_STATUS_IS_OK(status)) {
    1146      422308 :                 DBG_DEBUG("dbwrap_parse_record returned %s\n",
    1147             :                           nt_errstr(status));
    1148      422308 :                 return status;
    1149             :         }
    1150     4511626 :         if (!NT_STATUS_IS_OK(state.status)) {
    1151           0 :                 DBG_DEBUG("g_lock_dump_fn returned %s\n",
    1152             :                           nt_errstr(state.status));
    1153           0 :                 return state.status;
    1154             :         }
    1155     4511626 :         return NT_STATUS_OK;
    1156             : }
    1157             : 
    1158             : static void g_lock_dump_done(struct tevent_req *subreq);
    1159             : 
    1160           0 : struct tevent_req *g_lock_dump_send(
    1161             :         TALLOC_CTX *mem_ctx,
    1162             :         struct tevent_context *ev,
    1163             :         struct g_lock_ctx *ctx,
    1164             :         TDB_DATA key,
    1165             :         void (*fn)(struct server_id exclusive,
    1166             :                    size_t num_shared,
    1167             :                    struct server_id *shared,
    1168             :                    const uint8_t *data,
    1169             :                    size_t datalen,
    1170             :                    void *private_data),
    1171             :         void *private_data)
    1172             : {
    1173           0 :         struct tevent_req *req = NULL, *subreq = NULL;
    1174           0 :         struct g_lock_dump_state *state = NULL;
    1175             : 
    1176           0 :         req = tevent_req_create(mem_ctx, &state, struct g_lock_dump_state);
    1177           0 :         if (req == NULL) {
    1178           0 :                 return NULL;
    1179             :         }
    1180           0 :         state->mem_ctx = state;
    1181           0 :         state->key = key;
    1182           0 :         state->fn = fn;
    1183           0 :         state->private_data = private_data;
    1184             : 
    1185           0 :         subreq = dbwrap_parse_record_send(
    1186             :                 state,
    1187             :                 ev,
    1188             :                 ctx->db,
    1189             :                 key,
    1190             :                 g_lock_dump_fn,
    1191             :                 state,
    1192           0 :                 &state->req_state);
    1193           0 :         if (tevent_req_nomem(subreq, req)) {
    1194           0 :                 return tevent_req_post(req, ev);
    1195             :         }
    1196           0 :         tevent_req_set_callback(subreq, g_lock_dump_done, req);
    1197           0 :         return req;
    1198             : }
    1199             : 
    1200           0 : static void g_lock_dump_done(struct tevent_req *subreq)
    1201             : {
    1202           0 :         struct tevent_req *req = tevent_req_callback_data(
    1203             :                 subreq, struct tevent_req);
    1204           0 :         struct g_lock_dump_state *state = tevent_req_data(
    1205             :                 req, struct g_lock_dump_state);
    1206             :         NTSTATUS status;
    1207             : 
    1208           0 :         status = dbwrap_parse_record_recv(subreq);
    1209           0 :         TALLOC_FREE(subreq);
    1210           0 :         if (tevent_req_nterror(req, status) ||
    1211           0 :             tevent_req_nterror(req, state->status)) {
    1212           0 :                 return;
    1213             :         }
    1214           0 :         tevent_req_done(req);
    1215             : }
    1216             : 
    1217           0 : NTSTATUS g_lock_dump_recv(struct tevent_req *req)
    1218             : {
    1219           0 :         return tevent_req_simple_recv_ntstatus(req);
    1220             : }
    1221             : 
    1222      175807 : int g_lock_seqnum(struct g_lock_ctx *ctx)
    1223             : {
    1224      175807 :         return dbwrap_get_seqnum(ctx->db);
    1225             : }
    1226             : 
    1227             : struct g_lock_watch_data_state {
    1228             :         struct tevent_context *ev;
    1229             :         struct g_lock_ctx *ctx;
    1230             :         TDB_DATA key;
    1231             :         struct server_id blocker;
    1232             :         bool blockerdead;
    1233             :         uint64_t unique_data_epoch;
    1234             :         NTSTATUS status;
    1235             : };
    1236             : 
    1237             : static void g_lock_watch_data_done(struct tevent_req *subreq);
    1238             : 
    1239        1224 : static void g_lock_watch_data_send_fn(
    1240             :         struct db_record *rec,
    1241             :         TDB_DATA value,
    1242             :         void *private_data)
    1243             : {
    1244        1224 :         struct tevent_req *req = talloc_get_type_abort(
    1245             :                 private_data, struct tevent_req);
    1246        1224 :         struct g_lock_watch_data_state *state = tevent_req_data(
    1247             :                 req, struct g_lock_watch_data_state);
    1248        1224 :         struct tevent_req *subreq = NULL;
    1249             :         struct g_lock lck;
    1250             :         bool ok;
    1251             : 
    1252        1224 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1253        1224 :         if (!ok) {
    1254           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1255           0 :                 return;
    1256             :         }
    1257        1224 :         state->unique_data_epoch = lck.unique_data_epoch;
    1258             : 
    1259        1224 :         DBG_DEBUG("state->unique_data_epoch=%"PRIu64"\n", state->unique_data_epoch);
    1260             : 
    1261        1224 :         subreq = dbwrap_watched_watch_send(
    1262             :                 state, state->ev, rec, state->blocker);
    1263        1224 :         if (subreq == NULL) {
    1264           0 :                 state->status = NT_STATUS_NO_MEMORY;
    1265           0 :                 return;
    1266             :         }
    1267        1224 :         tevent_req_set_callback(subreq, g_lock_watch_data_done, req);
    1268             : 
    1269        1224 :         state->status = NT_STATUS_EVENT_PENDING;
    1270             : }
    1271             : 
    1272        1224 : struct tevent_req *g_lock_watch_data_send(
    1273             :         TALLOC_CTX *mem_ctx,
    1274             :         struct tevent_context *ev,
    1275             :         struct g_lock_ctx *ctx,
    1276             :         TDB_DATA key,
    1277             :         struct server_id blocker)
    1278             : {
    1279        1224 :         struct tevent_req *req = NULL;
    1280        1224 :         struct g_lock_watch_data_state *state = NULL;
    1281             :         NTSTATUS status;
    1282             : 
    1283        1224 :         req = tevent_req_create(
    1284             :                 mem_ctx, &state, struct g_lock_watch_data_state);
    1285        1224 :         if (req == NULL) {
    1286           0 :                 return NULL;
    1287             :         }
    1288        1224 :         state->ev = ev;
    1289        1224 :         state->ctx = ctx;
    1290        1224 :         state->blocker = blocker;
    1291             : 
    1292        1224 :         state->key = tdb_data_talloc_copy(state, key);
    1293        1224 :         if (tevent_req_nomem(state->key.dptr, req)) {
    1294           0 :                 return tevent_req_post(req, ev);
    1295             :         }
    1296             : 
    1297        1224 :         status = dbwrap_do_locked(
    1298             :                 ctx->db, key, g_lock_watch_data_send_fn, req);
    1299        1224 :         if (tevent_req_nterror(req, status)) {
    1300           0 :                 DBG_DEBUG("dbwrap_do_locked returned %s\n", nt_errstr(status));
    1301           0 :                 return tevent_req_post(req, ev);
    1302             :         }
    1303             : 
    1304        1224 :         if (NT_STATUS_IS_OK(state->status)) {
    1305           0 :                 tevent_req_done(req);
    1306           0 :                 return tevent_req_post(req, ev);
    1307             :         }
    1308             : 
    1309        1221 :         return req;
    1310             : }
    1311             : 
    1312        2044 : static void g_lock_watch_data_done_fn(
    1313             :         struct db_record *rec,
    1314             :         TDB_DATA value,
    1315             :         void *private_data)
    1316             : {
    1317        2044 :         struct tevent_req *req = talloc_get_type_abort(
    1318             :                 private_data, struct tevent_req);
    1319        2044 :         struct g_lock_watch_data_state *state = tevent_req_data(
    1320             :                 req, struct g_lock_watch_data_state);
    1321        2044 :         struct tevent_req *subreq = NULL;
    1322             :         struct g_lock lck;
    1323             :         bool ok;
    1324             : 
    1325        2044 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1326        2044 :         if (!ok) {
    1327           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1328         585 :                 return;
    1329             :         }
    1330             : 
    1331        2044 :         if (lck.unique_data_epoch != state->unique_data_epoch) {
    1332         610 :                 DBG_DEBUG("lck.unique_data_epoch=%"PRIu64", "
    1333             :                           "state->unique_data_epoch=%"PRIu64"\n",
    1334             :                           lck.unique_data_epoch,
    1335             :                           state->unique_data_epoch);
    1336         610 :                 state->status = NT_STATUS_OK;
    1337         610 :                 return;
    1338             :         }
    1339             : 
    1340        1434 :         subreq = dbwrap_watched_watch_send(
    1341             :                 state, state->ev, rec, state->blocker);
    1342        1434 :         if (subreq == NULL) {
    1343           0 :                 state->status = NT_STATUS_NO_MEMORY;
    1344           0 :                 return;
    1345             :         }
    1346        1434 :         tevent_req_set_callback(subreq, g_lock_watch_data_done, req);
    1347             : 
    1348        1434 :         state->status = NT_STATUS_EVENT_PENDING;
    1349             : }
    1350             : 
    1351        2044 : static void g_lock_watch_data_done(struct tevent_req *subreq)
    1352             : {
    1353        2044 :         struct tevent_req *req = tevent_req_callback_data(
    1354             :                 subreq, struct tevent_req);
    1355        2044 :         struct g_lock_watch_data_state *state = tevent_req_data(
    1356             :                 req, struct g_lock_watch_data_state);
    1357             :         NTSTATUS status;
    1358             : 
    1359        2044 :         status = dbwrap_watched_watch_recv(
    1360             :                 subreq, &state->blockerdead, &state->blocker);
    1361        2044 :         TALLOC_FREE(subreq);
    1362        2044 :         if (tevent_req_nterror(req, status)) {
    1363           0 :                 DBG_DEBUG("dbwrap_watched_watch_recv returned %s\n",
    1364             :                           nt_errstr(status));
    1365        1434 :                 return;
    1366             :         }
    1367             : 
    1368        2044 :         status = dbwrap_do_locked(
    1369        2044 :                 state->ctx->db, state->key, g_lock_watch_data_done_fn, req);
    1370        2044 :         if (tevent_req_nterror(req, status)) {
    1371           0 :                 DBG_DEBUG("dbwrap_do_locked returned %s\n", nt_errstr(status));
    1372           0 :                 return;
    1373             :         }
    1374        2044 :         if (NT_STATUS_EQUAL(state->status, NT_STATUS_EVENT_PENDING)) {
    1375        1432 :                 return;
    1376             :         }
    1377         610 :         if (tevent_req_nterror(req, state->status)) {
    1378           0 :                 return;
    1379             :         }
    1380         610 :         tevent_req_done(req);
    1381             : }
    1382             : 
    1383         609 : NTSTATUS g_lock_watch_data_recv(
    1384             :         struct tevent_req *req,
    1385             :         bool *blockerdead,
    1386             :         struct server_id *blocker)
    1387             : {
    1388         609 :         struct g_lock_watch_data_state *state = tevent_req_data(
    1389             :                 req, struct g_lock_watch_data_state);
    1390             :         NTSTATUS status;
    1391             : 
    1392         609 :         if (tevent_req_is_nterror(req, &status)) {
    1393           0 :                 return status;
    1394             :         }
    1395         609 :         if (blockerdead != NULL) {
    1396         609 :                 *blockerdead = state->blockerdead;
    1397             :         }
    1398         609 :         if (blocker != NULL) {
    1399         609 :                 *blocker = state->blocker;
    1400             :         }
    1401             : 
    1402         609 :         return NT_STATUS_OK;
    1403             : }
    1404             : 
    1405        4723 : static void g_lock_wake_watchers_fn(
    1406             :         struct db_record *rec,
    1407             :         TDB_DATA value,
    1408             :         void *private_data)
    1409             : {
    1410        4723 :         struct g_lock lck = { .exclusive.pid = 0 };
    1411             :         NTSTATUS status;
    1412             :         bool ok;
    1413             : 
    1414        4723 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1415        4723 :         if (!ok) {
    1416           0 :                 DBG_WARNING("g_lock_parse failed\n");
    1417           0 :                 return;
    1418             :         }
    1419             : 
    1420        4723 :         lck.unique_data_epoch = generate_unique_u64(lck.unique_data_epoch);
    1421             : 
    1422        4723 :         status = g_lock_store(rec, &lck, NULL, NULL, 0);
    1423        4723 :         if (!NT_STATUS_IS_OK(status)) {
    1424           0 :                 DBG_WARNING("g_lock_store failed: %s\n", nt_errstr(status));
    1425           0 :                 return;
    1426             :         }
    1427             : }
    1428             : 
    1429        4723 : void g_lock_wake_watchers(struct g_lock_ctx *ctx, TDB_DATA key)
    1430             : {
    1431             :         NTSTATUS status;
    1432             : 
    1433        4723 :         status = dbwrap_do_locked(ctx->db, key, g_lock_wake_watchers_fn, NULL);
    1434        4723 :         if (!NT_STATUS_IS_OK(status)) {
    1435           0 :                 DBG_DEBUG("dbwrap_do_locked returned %s\n",
    1436             :                           nt_errstr(status));
    1437             :         }
    1438        4723 : }

Generated by: LCOV version 1.13