Line data Source code
1 : /*
2 : Unix SMB/CIFS implementation.
3 : Blocking Locking functions
4 : Copyright (C) Jeremy Allison 1998-2003
5 :
6 : This program is free software; you can redistribute it and/or modify
7 : it under the terms of the GNU General Public License as published by
8 : the Free Software Foundation; either version 3 of the License, or
9 : (at your option) any later version.
10 :
11 : This program is distributed in the hope that it will be useful,
12 : but WITHOUT ANY WARRANTY; without even the implied warranty of
13 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 : GNU General Public License for more details.
15 :
16 : You should have received a copy of the GNU General Public License
17 : along with this program. If not, see <http://www.gnu.org/licenses/>.
18 : */
19 :
20 : #include "includes.h"
21 : #include "locking/share_mode_lock.h"
22 : #include "smbd/smbd.h"
23 : #include "smbd/globals.h"
24 : #include "messages.h"
25 : #include "lib/util/tevent_ntstatus.h"
26 : #include "lib/dbwrap/dbwrap_watch.h"
27 : #include "librpc/gen_ndr/ndr_open_files.h"
28 :
29 : #undef DBGC_CLASS
30 : #define DBGC_CLASS DBGC_LOCKING
31 :
32 5803 : NTSTATUS smbd_do_locks_try(
33 : struct files_struct *fsp,
34 : enum brl_flavour lock_flav,
35 : uint16_t num_locks,
36 : struct smbd_lock_element *locks,
37 : uint16_t *blocker_idx,
38 : struct server_id *blocking_pid,
39 : uint64_t *blocking_smblctx)
40 : {
41 5803 : NTSTATUS status = NT_STATUS_OK;
42 : uint16_t i;
43 :
44 8512 : for (i=0; i<num_locks; i++) {
45 5869 : struct smbd_lock_element *e = &locks[i];
46 :
47 11676 : status = do_lock(
48 : fsp,
49 : locks, /* req_mem_ctx */
50 5869 : &e->req_guid,
51 : e->smblctx,
52 : e->count,
53 : e->offset,
54 : e->brltype,
55 : lock_flav,
56 : blocking_pid,
57 : blocking_smblctx);
58 5869 : if (!NT_STATUS_IS_OK(status)) {
59 3152 : break;
60 : }
61 : }
62 :
63 5803 : if (NT_STATUS_IS_OK(status)) {
64 2643 : return NT_STATUS_OK;
65 : }
66 :
67 3160 : *blocker_idx = i;
68 :
69 : /*
70 : * Undo the locks we successfully got
71 : */
72 3192 : for (i = i-1; i != UINT16_MAX; i--) {
73 32 : struct smbd_lock_element *e = &locks[i];
74 32 : do_unlock(fsp,
75 : e->smblctx,
76 : e->count,
77 : e->offset,
78 : lock_flav);
79 : }
80 :
81 3160 : return status;
82 : }
83 :
84 492 : static bool smbd_smb1_fsp_add_blocked_lock_req(
85 : struct files_struct *fsp, struct tevent_req *req)
86 : {
87 492 : size_t num_reqs = talloc_array_length(fsp->blocked_smb1_lock_reqs);
88 492 : struct tevent_req **tmp = NULL;
89 :
90 492 : tmp = talloc_realloc(
91 : fsp,
92 : fsp->blocked_smb1_lock_reqs,
93 : struct tevent_req *,
94 : num_reqs+1);
95 492 : if (tmp == NULL) {
96 0 : return false;
97 : }
98 492 : fsp->blocked_smb1_lock_reqs = tmp;
99 492 : fsp->blocked_smb1_lock_reqs[num_reqs] = req;
100 492 : return true;
101 : }
102 :
103 : struct smbd_smb1_do_locks_state {
104 : struct tevent_context *ev;
105 : struct smb_request *smbreq;
106 : struct files_struct *fsp;
107 : uint32_t timeout;
108 : uint32_t polling_msecs;
109 : uint32_t retry_msecs;
110 : struct timeval endtime;
111 : bool large_offset; /* required for correct cancel */
112 : enum brl_flavour lock_flav;
113 : uint16_t num_locks;
114 : struct smbd_lock_element *locks;
115 : uint16_t blocker;
116 : NTSTATUS deny_status;
117 : };
118 :
119 : static void smbd_smb1_do_locks_try(struct tevent_req *req);
120 : static void smbd_smb1_do_locks_retry(struct tevent_req *subreq);
121 : static void smbd_smb1_blocked_locks_cleanup(
122 : struct tevent_req *req, enum tevent_req_state req_state);
123 : static NTSTATUS smbd_smb1_do_locks_check(
124 : struct files_struct *fsp,
125 : enum brl_flavour lock_flav,
126 : uint16_t num_locks,
127 : struct smbd_lock_element *locks,
128 : uint16_t *blocker_idx,
129 : struct server_id *blocking_pid,
130 : uint64_t *blocking_smblctx);
131 :
132 2740 : static void smbd_smb1_do_locks_setup_timeout(
133 : struct smbd_smb1_do_locks_state *state,
134 : const struct smbd_lock_element *blocker)
135 : {
136 2740 : struct files_struct *fsp = state->fsp;
137 :
138 2740 : if (!timeval_is_zero(&state->endtime)) {
139 : /*
140 : * already done
141 : */
142 486 : return;
143 : }
144 :
145 2252 : if ((state->timeout != 0) && (state->timeout != UINT32_MAX)) {
146 : /*
147 : * Windows internal resolution for blocking locks
148 : * seems to be about 200ms... Don't wait for less than
149 : * that. JRA.
150 : */
151 214 : state->timeout = MAX(state->timeout, lp_lock_spin_time());
152 : }
153 :
154 2252 : if (state->timeout != 0) {
155 228 : goto set_endtime;
156 : }
157 :
158 2024 : if (blocker == NULL) {
159 0 : goto set_endtime;
160 : }
161 :
162 2152 : if ((blocker->offset >= 0xEF000000) &&
163 128 : ((blocker->offset >> 63) == 0)) {
164 : /*
165 : * This must be an optimization of an ancient
166 : * application bug...
167 : */
168 126 : state->timeout = lp_lock_spin_time();
169 : }
170 :
171 3914 : if (fsp->fsp_flags.lock_failure_seen &&
172 1895 : (blocker->offset == fsp->lock_failure_offset)) {
173 : /*
174 : * Delay repeated lock attempts on the same
175 : * lock. Maybe a more advanced version of the
176 : * above check?
177 : */
178 134 : DBG_DEBUG("Delaying lock request due to previous "
179 : "failure\n");
180 134 : state->timeout = lp_lock_spin_time();
181 : }
182 :
183 4124 : set_endtime:
184 : /*
185 : * Note state->timeout might still 0,
186 : * but that's ok, as we don't want to retry
187 : * in that case.
188 : */
189 4480 : state->endtime = timeval_add(&state->smbreq->request_time,
190 2246 : state->timeout / 1000,
191 2252 : (state->timeout % 1000) * 1000);
192 : }
193 :
194 10 : static void smbd_smb1_do_locks_update_retry_msecs(
195 : struct smbd_smb1_do_locks_state *state)
196 : {
197 : /*
198 : * The default lp_lock_spin_time() is 200ms,
199 : * we just use half of it to trigger the first retry.
200 : *
201 : * v_min is in the range of 0.001 to 10 secs
202 : * (0.1 secs by default)
203 : *
204 : * v_max is in the range of 0.01 to 100 secs
205 : * (1.0 secs by default)
206 : *
207 : * The typical steps are:
208 : * 0.1, 0.2, 0.3, 0.4, ... 1.0
209 : */
210 10 : uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()))/2;
211 10 : uint32_t v_max = 10 * v_min;
212 :
213 10 : if (state->retry_msecs >= v_max) {
214 0 : state->retry_msecs = v_max;
215 0 : return;
216 : }
217 :
218 10 : state->retry_msecs += v_min;
219 : }
220 :
221 82 : static void smbd_smb1_do_locks_update_polling_msecs(
222 : struct smbd_smb1_do_locks_state *state)
223 : {
224 : /*
225 : * The default lp_lock_spin_time() is 200ms.
226 : *
227 : * v_min is in the range of 0.002 to 20 secs
228 : * (0.2 secs by default)
229 : *
230 : * v_max is in the range of 0.02 to 200 secs
231 : * (2.0 secs by default)
232 : *
233 : * The typical steps are:
234 : * 0.2, 0.4, 0.6, 0.8, ... 2.0
235 : */
236 82 : uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()));
237 82 : uint32_t v_max = 10 * v_min;
238 :
239 82 : if (state->polling_msecs >= v_max) {
240 0 : state->polling_msecs = v_max;
241 0 : return;
242 : }
243 :
244 82 : state->polling_msecs += v_min;
245 : }
246 :
247 5667 : struct tevent_req *smbd_smb1_do_locks_send(
248 : TALLOC_CTX *mem_ctx,
249 : struct tevent_context *ev,
250 : struct smb_request **smbreq, /* talloc_move()d into our state */
251 : struct files_struct *fsp,
252 : uint32_t lock_timeout,
253 : bool large_offset,
254 : enum brl_flavour lock_flav,
255 : uint16_t num_locks,
256 : struct smbd_lock_element *locks)
257 : {
258 5667 : struct tevent_req *req = NULL;
259 5667 : struct smbd_smb1_do_locks_state *state = NULL;
260 : bool ok;
261 :
262 5667 : req = tevent_req_create(
263 : mem_ctx, &state, struct smbd_smb1_do_locks_state);
264 5667 : if (req == NULL) {
265 0 : return NULL;
266 : }
267 5667 : state->ev = ev;
268 5667 : state->smbreq = talloc_move(state, smbreq);
269 5667 : state->fsp = fsp;
270 5667 : state->timeout = lock_timeout;
271 5667 : state->large_offset = large_offset;
272 5667 : state->lock_flav = lock_flav;
273 5667 : state->num_locks = num_locks;
274 5667 : state->locks = locks;
275 :
276 5667 : if (lock_flav == POSIX_LOCK) {
277 : /*
278 : * SMB1 posix locks always use
279 : * NT_STATUS_FILE_LOCK_CONFLICT.
280 : */
281 28 : state->deny_status = NT_STATUS_FILE_LOCK_CONFLICT;
282 : } else {
283 5639 : state->deny_status = NT_STATUS_LOCK_NOT_GRANTED;
284 : }
285 :
286 5667 : DBG_DEBUG("state=%p, state->smbreq=%p\n", state, state->smbreq);
287 :
288 5667 : if (num_locks == 0) {
289 1499 : DBG_DEBUG("no locks\n");
290 1499 : tevent_req_done(req);
291 1499 : return tevent_req_post(req, ev);
292 : }
293 :
294 4168 : smbd_smb1_do_locks_try(req);
295 4168 : if (!tevent_req_is_in_progress(req)) {
296 3676 : return tevent_req_post(req, ev);
297 : }
298 :
299 492 : ok = smbd_smb1_fsp_add_blocked_lock_req(fsp, req);
300 492 : if (!ok) {
301 0 : tevent_req_oom(req);
302 0 : return tevent_req_post(req, ev);
303 : }
304 492 : tevent_req_set_cleanup_fn(req, smbd_smb1_blocked_locks_cleanup);
305 492 : return req;
306 : }
307 :
308 984 : static void smbd_smb1_blocked_locks_cleanup(
309 : struct tevent_req *req, enum tevent_req_state req_state)
310 : {
311 984 : struct smbd_smb1_do_locks_state *state = tevent_req_data(
312 : req, struct smbd_smb1_do_locks_state);
313 984 : struct files_struct *fsp = state->fsp;
314 984 : struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
315 984 : size_t num_blocked = talloc_array_length(blocked);
316 : size_t i;
317 :
318 984 : DBG_DEBUG("req=%p, state=%p, req_state=%d\n",
319 : req,
320 : state,
321 : (int)req_state);
322 :
323 984 : if (req_state == TEVENT_REQ_RECEIVED) {
324 492 : DBG_DEBUG("already received\n");
325 490 : return;
326 : }
327 :
328 546 : for (i=0; i<num_blocked; i++) {
329 548 : if (blocked[i] == req) {
330 490 : break;
331 : }
332 : }
333 492 : SMB_ASSERT(i<num_blocked);
334 :
335 492 : ARRAY_DEL_ELEMENT(blocked, i, num_blocked);
336 :
337 492 : fsp->blocked_smb1_lock_reqs = talloc_realloc(
338 : fsp, blocked, struct tevent_req *, num_blocked-1);
339 : }
340 :
341 146 : static NTSTATUS smbd_smb1_do_locks_check_blocked(
342 : uint16_t num_blocked,
343 : struct smbd_lock_element *blocked,
344 : uint16_t num_locks,
345 : struct smbd_lock_element *locks,
346 : uint16_t *blocker_idx,
347 : uint64_t *blocking_smblctx)
348 : {
349 : uint16_t li;
350 :
351 206 : for (li=0; li < num_locks; li++) {
352 146 : struct smbd_lock_element *l = &locks[li];
353 : uint16_t bi;
354 : bool valid;
355 :
356 146 : valid = byte_range_valid(l->offset, l->count);
357 146 : if (!valid) {
358 0 : return NT_STATUS_INVALID_LOCK_RANGE;
359 : }
360 :
361 242 : for (bi = 0; bi < num_blocked; bi++) {
362 182 : struct smbd_lock_element *b = &blocked[li];
363 : bool overlap;
364 :
365 : /* Read locks never conflict. */
366 182 : if (l->brltype == READ_LOCK && b->brltype == READ_LOCK) {
367 8 : continue;
368 : }
369 :
370 174 : overlap = byte_range_overlap(l->offset,
371 : l->count,
372 : b->offset,
373 : b->count);
374 174 : if (!overlap) {
375 88 : continue;
376 : }
377 :
378 86 : *blocker_idx = li;
379 86 : *blocking_smblctx = b->smblctx;
380 86 : return NT_STATUS_LOCK_NOT_GRANTED;
381 : }
382 : }
383 :
384 60 : return NT_STATUS_OK;
385 : }
386 :
387 4724 : static NTSTATUS smbd_smb1_do_locks_check(
388 : struct files_struct *fsp,
389 : enum brl_flavour lock_flav,
390 : uint16_t num_locks,
391 : struct smbd_lock_element *locks,
392 : uint16_t *blocker_idx,
393 : struct server_id *blocking_pid,
394 : uint64_t *blocking_smblctx)
395 : {
396 4724 : struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
397 4724 : size_t num_blocked = talloc_array_length(blocked);
398 : NTSTATUS status;
399 : size_t bi;
400 :
401 : /*
402 : * We check the pending/blocked requests
403 : * from the oldest to the youngest request.
404 : *
405 : * Note due to the retry logic the current request
406 : * might already be in the list.
407 : */
408 :
409 4784 : for (bi = 0; bi < num_blocked; bi++) {
410 657 : struct smbd_smb1_do_locks_state *blocked_state =
411 662 : tevent_req_data(blocked[bi],
412 : struct smbd_smb1_do_locks_state);
413 :
414 662 : if (blocked_state->locks == locks) {
415 516 : SMB_ASSERT(blocked_state->num_locks == num_locks);
416 516 : SMB_ASSERT(blocked_state->lock_flav == lock_flav);
417 :
418 : /*
419 : * We found ourself...
420 : */
421 514 : break;
422 : }
423 :
424 292 : status = smbd_smb1_do_locks_check_blocked(
425 146 : blocked_state->num_locks,
426 : blocked_state->locks,
427 : num_locks,
428 : locks,
429 : blocker_idx,
430 : blocking_smblctx);
431 146 : if (!NT_STATUS_IS_OK(status)) {
432 86 : *blocking_pid = messaging_server_id(
433 86 : fsp->conn->sconn->msg_ctx);
434 86 : return status;
435 : }
436 : }
437 :
438 4638 : status = smbd_do_locks_try(
439 : fsp,
440 : lock_flav,
441 : num_locks,
442 : locks,
443 : blocker_idx,
444 : blocking_pid,
445 : blocking_smblctx);
446 4638 : if (!NT_STATUS_IS_OK(status)) {
447 2678 : return status;
448 : }
449 :
450 1960 : return NT_STATUS_OK;
451 : }
452 :
453 4724 : static void smbd_smb1_do_locks_try(struct tevent_req *req)
454 : {
455 4724 : struct smbd_smb1_do_locks_state *state = tevent_req_data(
456 : req, struct smbd_smb1_do_locks_state);
457 4724 : struct files_struct *fsp = state->fsp;
458 : struct share_mode_lock *lck;
459 4724 : struct timeval endtime = { 0 };
460 4724 : struct server_id blocking_pid = { 0 };
461 4724 : uint64_t blocking_smblctx = 0;
462 4724 : struct tevent_req *subreq = NULL;
463 : NTSTATUS status;
464 : bool ok;
465 : bool expired;
466 :
467 4724 : lck = get_existing_share_mode_lock(state, fsp->file_id);
468 4724 : if (tevent_req_nomem(lck, req)) {
469 0 : DBG_DEBUG("Could not get share mode lock\n");
470 620 : return;
471 : }
472 :
473 9399 : status = smbd_smb1_do_locks_check(
474 : fsp,
475 : state->lock_flav,
476 4724 : state->num_locks,
477 : state->locks,
478 : &state->blocker,
479 : &blocking_pid,
480 : &blocking_smblctx);
481 4724 : if (NT_STATUS_IS_OK(status)) {
482 1944 : goto done;
483 : }
484 2764 : if (NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
485 : /*
486 : * We got NT_STATUS_RETRY,
487 : * we reset polling_msecs so that
488 : * that the retries based on LOCK_NOT_GRANTED
489 : * will later start with small intervalls again.
490 : */
491 22 : state->polling_msecs = 0;
492 :
493 : /*
494 : * The backend wasn't able to decide yet.
495 : * We need to wait even for non-blocking
496 : * locks.
497 : *
498 : * The backend uses blocking_smblctx == UINT64_MAX
499 : * to indicate that we should use retry timers.
500 : *
501 : * It uses blocking_smblctx == 0 to indicate
502 : * it will use share_mode_wakeup_waiters()
503 : * to wake us. Note that unrelated changes in
504 : * locking.tdb may cause retries.
505 : */
506 :
507 22 : if (blocking_smblctx != UINT64_MAX) {
508 12 : SMB_ASSERT(blocking_smblctx == 0);
509 12 : goto setup_retry;
510 : }
511 :
512 10 : smbd_smb1_do_locks_update_retry_msecs(state);
513 :
514 10 : DBG_DEBUG("Waiting for a backend decision. "
515 : "Retry in %"PRIu32" msecs\n",
516 : state->retry_msecs);
517 :
518 : /*
519 : * We completely ignore state->endtime here
520 : * we we'll wait for a backend decision forever.
521 : * If the backend is smart enough to implement
522 : * some NT_STATUS_RETRY logic, it has to
523 : * switch to any other status after in order
524 : * to avoid waiting forever.
525 : */
526 10 : endtime = timeval_current_ofs_msec(state->retry_msecs);
527 10 : goto setup_retry;
528 : }
529 2742 : if (!ERROR_WAS_LOCK_DENIED(status)) {
530 2 : goto done;
531 : }
532 : /*
533 : * We got LOCK_NOT_GRANTED, make sure
534 : * a following STATUS_RETRY will start
535 : * with short intervalls again.
536 : */
537 2740 : state->retry_msecs = 0;
538 :
539 2740 : smbd_smb1_do_locks_setup_timeout(state, &state->locks[state->blocker]);
540 2740 : DBG_DEBUG("timeout=%"PRIu32", blocking_smblctx=%"PRIu64"\n",
541 : state->timeout,
542 : blocking_smblctx);
543 :
544 : /*
545 : * The client specified timeout expired
546 : * avoid further retries.
547 : *
548 : * Otherwise keep waiting either waiting
549 : * for changes in locking.tdb or the polling
550 : * mode timers waiting for posix locks.
551 : *
552 : * If the endtime is not expired yet,
553 : * it means we'll retry after a timeout.
554 : * In that case we'll have to return
555 : * NT_STATUS_FILE_LOCK_CONFLICT
556 : * instead of NT_STATUS_LOCK_NOT_GRANTED.
557 : */
558 2740 : expired = timeval_expired(&state->endtime);
559 2740 : if (expired) {
560 2142 : status = state->deny_status;
561 2142 : goto done;
562 : }
563 598 : state->deny_status = NT_STATUS_FILE_LOCK_CONFLICT;
564 :
565 598 : endtime = state->endtime;
566 :
567 598 : if (blocking_smblctx == UINT64_MAX) {
568 : struct timeval tmp;
569 :
570 82 : smbd_smb1_do_locks_update_polling_msecs(state);
571 :
572 82 : DBG_DEBUG("Blocked on a posix lock. Retry in %"PRIu32" msecs\n",
573 : state->polling_msecs);
574 :
575 82 : tmp = timeval_current_ofs_msec(state->polling_msecs);
576 82 : endtime = timeval_min(&endtime, &tmp);
577 : }
578 :
579 1121 : setup_retry:
580 620 : subreq = share_mode_watch_send(
581 : state, state->ev, lck, blocking_pid);
582 620 : if (tevent_req_nomem(subreq, req)) {
583 0 : goto done;
584 : }
585 620 : TALLOC_FREE(lck);
586 620 : tevent_req_set_callback(subreq, smbd_smb1_do_locks_retry, req);
587 :
588 620 : if (timeval_is_zero(&endtime)) {
589 12 : return;
590 : }
591 :
592 608 : ok = tevent_req_set_endtime(subreq, state->ev, endtime);
593 608 : if (!ok) {
594 0 : status = NT_STATUS_NO_MEMORY;
595 0 : goto done;
596 : }
597 606 : return;
598 4120 : done:
599 4104 : TALLOC_FREE(lck);
600 4104 : smbd_smb1_brl_finish_by_req(req, status);
601 : }
602 :
603 556 : static void smbd_smb1_do_locks_retry(struct tevent_req *subreq)
604 : {
605 556 : struct tevent_req *req = tevent_req_callback_data(
606 : subreq, struct tevent_req);
607 556 : struct smbd_smb1_do_locks_state *state = tevent_req_data(
608 : req, struct smbd_smb1_do_locks_state);
609 : NTSTATUS status;
610 : bool ok;
611 :
612 : /*
613 : * Make sure we run as the user again
614 : */
615 556 : ok = change_to_user_and_service_by_fsp(state->fsp);
616 556 : if (!ok) {
617 0 : tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
618 0 : return;
619 : }
620 :
621 556 : status = share_mode_watch_recv(subreq, NULL, NULL);
622 556 : TALLOC_FREE(subreq);
623 :
624 556 : DBG_DEBUG("share_mode_watch_recv returned %s\n",
625 : nt_errstr(status));
626 :
627 : /*
628 : * We ignore any errors here, it's most likely
629 : * we just get NT_STATUS_OK or NT_STATUS_IO_TIMEOUT.
630 : *
631 : * In any case we can just give it a retry.
632 : */
633 :
634 556 : smbd_smb1_do_locks_try(req);
635 : }
636 :
637 5667 : NTSTATUS smbd_smb1_do_locks_recv(struct tevent_req *req)
638 : {
639 5667 : struct smbd_smb1_do_locks_state *state = tevent_req_data(
640 : req, struct smbd_smb1_do_locks_state);
641 5667 : NTSTATUS status = NT_STATUS_OK;
642 : bool err;
643 :
644 5667 : err = tevent_req_is_nterror(req, &status);
645 :
646 5667 : DBG_DEBUG("err=%d, status=%s\n", (int)err, nt_errstr(status));
647 :
648 5667 : if (tevent_req_is_nterror(req, &status)) {
649 2206 : struct files_struct *fsp = state->fsp;
650 2206 : struct smbd_lock_element *blocker =
651 2206 : &state->locks[state->blocker];
652 :
653 2206 : DBG_DEBUG("Setting lock_failure_offset=%"PRIu64"\n",
654 : blocker->offset);
655 :
656 2206 : fsp->fsp_flags.lock_failure_seen = true;
657 2206 : fsp->lock_failure_offset = blocker->offset;
658 2206 : return status;
659 : }
660 :
661 3461 : tevent_req_received(req);
662 :
663 3461 : return NT_STATUS_OK;
664 : }
665 :
666 5667 : bool smbd_smb1_do_locks_extract_smbreq(
667 : struct tevent_req *req,
668 : TALLOC_CTX *mem_ctx,
669 : struct smb_request **psmbreq)
670 : {
671 5667 : struct smbd_smb1_do_locks_state *state = tevent_req_data(
672 : req, struct smbd_smb1_do_locks_state);
673 :
674 5667 : DBG_DEBUG("req=%p, state=%p, state->smbreq=%p\n",
675 : req,
676 : state,
677 : state->smbreq);
678 :
679 5667 : if (state->smbreq == NULL) {
680 0 : return false;
681 : }
682 5667 : *psmbreq = talloc_move(mem_ctx, &state->smbreq);
683 5667 : return true;
684 : }
685 :
686 4164 : void smbd_smb1_brl_finish_by_req(struct tevent_req *req, NTSTATUS status)
687 : {
688 4164 : DBG_DEBUG("req=%p, status=%s\n", req, nt_errstr(status));
689 :
690 4164 : if (NT_STATUS_IS_OK(status)) {
691 1962 : tevent_req_done(req);
692 : } else {
693 2202 : tevent_req_nterror(req, status);
694 : }
695 4164 : }
696 :
697 1589 : bool smbd_smb1_brl_finish_by_lock(
698 : struct files_struct *fsp,
699 : bool large_offset,
700 : enum brl_flavour lock_flav,
701 : struct smbd_lock_element lock,
702 : NTSTATUS finish_status)
703 : {
704 1589 : struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
705 1589 : size_t num_blocked = talloc_array_length(blocked);
706 : size_t i;
707 :
708 1589 : DBG_DEBUG("num_blocked=%zu\n", num_blocked);
709 :
710 1646 : for (i=0; i<num_blocked; i++) {
711 74 : struct tevent_req *req = blocked[i];
712 74 : struct smbd_smb1_do_locks_state *state = tevent_req_data(
713 : req, struct smbd_smb1_do_locks_state);
714 : uint16_t j;
715 :
716 74 : DBG_DEBUG("i=%zu, req=%p\n", i, req);
717 :
718 144 : if ((state->large_offset != large_offset) ||
719 70 : (state->lock_flav != lock_flav)) {
720 4 : continue;
721 : }
722 :
723 162 : for (j=0; j<state->num_locks; j++) {
724 104 : struct smbd_lock_element *l = &state->locks[j];
725 :
726 118 : if ((lock.smblctx == l->smblctx) &&
727 26 : (lock.offset == l->offset) &&
728 12 : (lock.count == l->count)) {
729 12 : smbd_smb1_brl_finish_by_req(
730 : req, finish_status);
731 12 : return true;
732 : }
733 : }
734 : }
735 1572 : return false;
736 : }
737 :
738 752 : static struct files_struct *smbd_smb1_brl_finish_by_mid_fn(
739 : struct files_struct *fsp, void *private_data)
740 : {
741 752 : struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
742 752 : size_t num_blocked = talloc_array_length(blocked);
743 752 : uint64_t mid = *((uint64_t *)private_data);
744 : size_t i;
745 :
746 752 : DBG_DEBUG("fsp=%p, num_blocked=%zu\n", fsp, num_blocked);
747 :
748 752 : for (i=0; i<num_blocked; i++) {
749 4 : struct tevent_req *req = blocked[i];
750 4 : struct smbd_smb1_do_locks_state *state = tevent_req_data(
751 : req, struct smbd_smb1_do_locks_state);
752 4 : struct smb_request *smbreq = state->smbreq;
753 :
754 4 : if (smbreq->mid == mid) {
755 4 : tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
756 4 : return fsp;
757 : }
758 : }
759 :
760 748 : return NULL;
761 : }
762 :
763 : /*
764 : * This walks the list of fsps, we store the blocked reqs attached to
765 : * them. It can be expensive, but this is legacy SMB1 and trying to
766 : * remember looking at traces I don't reall many of those calls.
767 : */
768 :
769 64 : bool smbd_smb1_brl_finish_by_mid(
770 : struct smbd_server_connection *sconn, uint64_t mid)
771 : {
772 64 : struct files_struct *found = files_forall(
773 : sconn, smbd_smb1_brl_finish_by_mid_fn, &mid);
774 64 : return (found != NULL);
775 : }
|