Line data Source code
1 : /*
2 : Unix SMB/CIFS implementation.
3 : Core SMB2 server
4 :
5 : Copyright (C) Stefan Metzmacher 2009
6 : Copyright (C) Jeremy Allison 2010
7 :
8 : This program is free software; you can redistribute it and/or modify
9 : it under the terms of the GNU General Public License as published by
10 : the Free Software Foundation; either version 3 of the License, or
11 : (at your option) any later version.
12 :
13 : This program is distributed in the hope that it will be useful,
14 : but WITHOUT ANY WARRANTY; without even the implied warranty of
15 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 : GNU General Public License for more details.
17 :
18 : You should have received a copy of the GNU General Public License
19 : along with this program. If not, see <http://www.gnu.org/licenses/>.
20 : */
21 :
22 : #include "includes.h"
23 : #include "locking/share_mode_lock.h"
24 : #include "smbd/smbd.h"
25 : #include "smbd/globals.h"
26 : #include "../libcli/smb/smb_common.h"
27 : #include "../lib/util/tevent_ntstatus.h"
28 : #include "lib/dbwrap/dbwrap_watch.h"
29 : #include "librpc/gen_ndr/open_files.h"
30 : #include "messages.h"
31 :
32 : #undef DBGC_CLASS
33 : #define DBGC_CLASS DBGC_SMB2
34 :
35 : struct smbd_smb2_lock_element {
36 : uint64_t offset;
37 : uint64_t length;
38 : uint32_t flags;
39 : };
40 :
41 : struct smbd_smb2_lock_state {
42 : struct tevent_context *ev;
43 : struct smbd_smb2_request *smb2req;
44 : struct smb_request *smb1req;
45 : struct files_struct *fsp;
46 : bool blocking;
47 : uint32_t polling_msecs;
48 : uint32_t retry_msecs;
49 : uint16_t lock_count;
50 : struct smbd_lock_element *locks;
51 : uint8_t lock_sequence_value;
52 : uint8_t *lock_sequence_element;
53 : };
54 :
55 : static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
56 : struct tevent_context *ev,
57 : struct smbd_smb2_request *smb2req,
58 : struct files_struct *in_fsp,
59 : uint32_t in_lock_sequence,
60 : uint16_t in_lock_count,
61 : struct smbd_smb2_lock_element *in_locks);
62 : static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
63 :
64 : static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
65 1937 : NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
66 : {
67 : const uint8_t *inbody;
68 : uint16_t in_lock_count;
69 : uint32_t in_lock_sequence;
70 : uint64_t in_file_id_persistent;
71 : uint64_t in_file_id_volatile;
72 : struct files_struct *in_fsp;
73 : struct smbd_smb2_lock_element *in_locks;
74 : struct tevent_req *subreq;
75 : const uint8_t *lock_buffer;
76 : uint16_t l;
77 : NTSTATUS status;
78 :
79 1937 : status = smbd_smb2_request_verify_sizes(req, 0x30);
80 1937 : if (!NT_STATUS_IS_OK(status)) {
81 8 : return smbd_smb2_request_error(req, status);
82 : }
83 1929 : inbody = SMBD_SMB2_IN_BODY_PTR(req);
84 :
85 1929 : in_lock_count = CVAL(inbody, 0x02);
86 1929 : if (req->xconn->protocol >= PROTOCOL_SMB2_10) {
87 1929 : in_lock_sequence = IVAL(inbody, 0x04);
88 : } else {
89 : /* 0x04 - 4 bytes reserved */
90 0 : in_lock_sequence = 0;
91 : }
92 1929 : in_file_id_persistent = BVAL(inbody, 0x08);
93 1929 : in_file_id_volatile = BVAL(inbody, 0x10);
94 :
95 1929 : if (in_lock_count < 1) {
96 0 : return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
97 : }
98 :
99 1929 : if (((in_lock_count - 1) * 0x18) > SMBD_SMB2_IN_DYN_LEN(req)) {
100 0 : return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
101 : }
102 :
103 1929 : in_locks = talloc_array(req, struct smbd_smb2_lock_element,
104 : in_lock_count);
105 1929 : if (in_locks == NULL) {
106 0 : return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
107 : }
108 :
109 1929 : l = 0;
110 1929 : lock_buffer = inbody + 0x18;
111 :
112 1929 : in_locks[l].offset = BVAL(lock_buffer, 0x00);
113 1929 : in_locks[l].length = BVAL(lock_buffer, 0x08);
114 1929 : in_locks[l].flags = IVAL(lock_buffer, 0x10);
115 : /* 0x14 - 4 reserved bytes */
116 :
117 1929 : status = req->session->status;
118 1929 : if (NT_STATUS_EQUAL(status, NT_STATUS_NETWORK_SESSION_EXPIRED)) {
119 : /*
120 : * We need to catch NT_STATUS_NETWORK_SESSION_EXPIRED
121 : * for lock requests only.
122 : *
123 : * Unlock requests still need to be processed!
124 : *
125 : * This means smbd_smb2_request_check_session()
126 : * can't handle the difference and always
127 : * allows SMB2_OP_LOCK.
128 : */
129 24 : if (in_locks[0].flags != SMB2_LOCK_FLAG_UNLOCK) {
130 8 : return smbd_smb2_request_error(req, status);
131 : }
132 : }
133 :
134 1921 : lock_buffer = SMBD_SMB2_IN_DYN_PTR(req);
135 :
136 1989 : for (l=1; l < in_lock_count; l++) {
137 68 : in_locks[l].offset = BVAL(lock_buffer, 0x00);
138 68 : in_locks[l].length = BVAL(lock_buffer, 0x08);
139 68 : in_locks[l].flags = IVAL(lock_buffer, 0x10);
140 : /* 0x14 - 4 reserved bytes */
141 :
142 68 : lock_buffer += 0x18;
143 : }
144 :
145 1921 : in_fsp = file_fsp_smb2(req, in_file_id_persistent, in_file_id_volatile);
146 1921 : if (in_fsp == NULL) {
147 0 : return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
148 : }
149 :
150 1921 : subreq = smbd_smb2_lock_send(req, req->sconn->ev_ctx,
151 : req, in_fsp,
152 : in_lock_sequence,
153 : in_lock_count,
154 : in_locks);
155 1921 : if (subreq == NULL) {
156 0 : return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
157 : }
158 1921 : tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
159 :
160 1921 : return smbd_smb2_request_pending_queue(req, subreq, 500);
161 : }
162 :
163 1921 : static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
164 : {
165 1921 : struct smbd_smb2_request *smb2req = tevent_req_callback_data(subreq,
166 : struct smbd_smb2_request);
167 : DATA_BLOB outbody;
168 : NTSTATUS status;
169 : NTSTATUS error; /* transport error */
170 :
171 1921 : status = smbd_smb2_lock_recv(subreq);
172 1921 : TALLOC_FREE(subreq);
173 1921 : if (!NT_STATUS_IS_OK(status)) {
174 634 : error = smbd_smb2_request_error(smb2req, status);
175 634 : if (!NT_STATUS_IS_OK(error)) {
176 0 : smbd_server_connection_terminate(smb2req->xconn,
177 : nt_errstr(error));
178 5 : return;
179 : }
180 634 : return;
181 : }
182 :
183 1287 : outbody = smbd_smb2_generate_outbody(smb2req, 0x04);
184 1287 : if (outbody.data == NULL) {
185 0 : error = smbd_smb2_request_error(smb2req, NT_STATUS_NO_MEMORY);
186 0 : if (!NT_STATUS_IS_OK(error)) {
187 0 : smbd_server_connection_terminate(smb2req->xconn,
188 : nt_errstr(error));
189 0 : return;
190 : }
191 0 : return;
192 : }
193 :
194 1287 : SSVAL(outbody.data, 0x00, 0x04); /* struct size */
195 1287 : SSVAL(outbody.data, 0x02, 0); /* reserved */
196 :
197 1287 : error = smbd_smb2_request_done(smb2req, outbody, NULL);
198 1287 : if (!NT_STATUS_IS_OK(error)) {
199 0 : smbd_server_connection_terminate(smb2req->xconn,
200 : nt_errstr(error));
201 0 : return;
202 : }
203 : }
204 :
205 : static void smbd_smb2_lock_cleanup(struct tevent_req *req,
206 : enum tevent_req_state req_state);
207 : static void smbd_smb2_lock_try(struct tevent_req *req);
208 : static void smbd_smb2_lock_retry(struct tevent_req *subreq);
209 : static bool smbd_smb2_lock_cancel(struct tevent_req *req);
210 :
211 1921 : static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
212 : struct tevent_context *ev,
213 : struct smbd_smb2_request *smb2req,
214 : struct files_struct *fsp,
215 : uint32_t in_lock_sequence,
216 : uint16_t in_lock_count,
217 : struct smbd_smb2_lock_element *in_locks)
218 : {
219 : struct tevent_req *req;
220 : struct smbd_smb2_lock_state *state;
221 1921 : bool isunlock = false;
222 : uint16_t i;
223 : struct smbd_lock_element *locks;
224 : NTSTATUS status;
225 1921 : bool check_lock_sequence = false;
226 1921 : uint32_t lock_sequence_bucket = 0;
227 :
228 1921 : req = tevent_req_create(mem_ctx, &state,
229 : struct smbd_smb2_lock_state);
230 1921 : if (req == NULL) {
231 0 : return NULL;
232 : }
233 1921 : state->ev = ev;
234 1921 : state->fsp = fsp;
235 1921 : state->smb2req = smb2req;
236 1921 : smb2req->subreq = req; /* So we can find this when going async. */
237 :
238 1921 : tevent_req_set_cleanup_fn(req, smbd_smb2_lock_cleanup);
239 :
240 1921 : state->smb1req = smbd_smb2_fake_smb_request(smb2req);
241 1921 : if (tevent_req_nomem(state->smb1req, req)) {
242 0 : return tevent_req_post(req, ev);
243 : }
244 :
245 1921 : DEBUG(10,("smbd_smb2_lock_send: %s - %s\n",
246 : fsp_str_dbg(fsp), fsp_fnum_dbg(fsp)));
247 :
248 : /*
249 : * Windows sets check_lock_sequence = true
250 : * only for resilient and persistent handles.
251 : *
252 : * [MS-SMB2] 3.3.5.14 Receiving an SMB2 LOCK Request
253 : *
254 : * ... if Open.IsResilient or Open.IsDurable or Open.IsPersistent is
255 : * TRUE or if Connection.Dialect belongs to the SMB 3.x dialect family
256 : * and Connection.ServerCapabilities includes
257 : * SMB2_GLOBAL_CAP_MULTI_CHANNEL bit, the server SHOULD<314>
258 : * perform lock sequence * verification ...
259 :
260 : * <314> Section 3.3.5.14: Windows 7 and Windows Server 2008 R2 perform
261 : * lock sequence verification only when Open.IsResilient is TRUE.
262 : * Windows 8 through Windows 10 v1909 and Windows Server 2012 through
263 : * Windows Server v1909 perform lock sequence verification only when
264 : * Open.IsResilient or Open.IsPersistent is TRUE.
265 : *
266 : * Note <314> also applies to all versions (at least) up to
267 : * Windows Server v2004.
268 : *
269 : * Hopefully this will be fixed in future Windows versions and they
270 : * will avoid Note <314>.
271 : *
272 : * We implement what the specification says by default, but
273 : * allow "smb2 disable lock sequence checking = yes" to
274 : * behave like Windows again.
275 : *
276 : * Note: that we already check the dialect before setting
277 : * SMB2_CAP_MULTI_CHANNEL in smb2_negprot.c
278 : */
279 1921 : if (smb2req->xconn->smb2.server.capabilities & SMB2_CAP_MULTI_CHANNEL) {
280 1921 : check_lock_sequence = true;
281 : }
282 1921 : if (fsp->op->global->durable) {
283 112 : check_lock_sequence = true;
284 : }
285 :
286 1921 : if (check_lock_sequence) {
287 1896 : bool disable_lock_sequence_checking =
288 25 : lp_smb2_disable_lock_sequence_checking();
289 :
290 1921 : if (disable_lock_sequence_checking) {
291 0 : check_lock_sequence = false;
292 : }
293 : }
294 :
295 1921 : if (check_lock_sequence) {
296 1921 : state->lock_sequence_value = in_lock_sequence & 0xF;
297 1921 : lock_sequence_bucket = in_lock_sequence >> 4;
298 : }
299 1921 : if ((lock_sequence_bucket > 0) &&
300 : (lock_sequence_bucket <= sizeof(fsp->op->global->lock_sequence_array)))
301 : {
302 144 : uint32_t idx = lock_sequence_bucket - 1;
303 144 : uint8_t *array = fsp->op->global->lock_sequence_array;
304 :
305 144 : state->lock_sequence_element = &array[idx];
306 : }
307 :
308 1921 : if (state->lock_sequence_element != NULL) {
309 : /*
310 : * The incoming 'state->lock_sequence_value' is masked with 0xF.
311 : *
312 : * Note per default '*state->lock_sequence_element'
313 : * is invalid, a value of 0xFF that can never match on
314 : * incoming value.
315 : */
316 144 : if (*state->lock_sequence_element == state->lock_sequence_value)
317 : {
318 64 : DBG_INFO("replayed smb2 lock request detected: "
319 : "file %s, value %u, bucket %u\n",
320 : fsp_str_dbg(fsp),
321 : (unsigned)state->lock_sequence_value,
322 : (unsigned)lock_sequence_bucket);
323 64 : tevent_req_done(req);
324 64 : return tevent_req_post(req, ev);
325 : }
326 : /*
327 : * If it's not a replay, mark the element as
328 : * invalid again.
329 : */
330 80 : *state->lock_sequence_element = 0xFF;
331 : }
332 :
333 1857 : locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
334 1857 : if (locks == NULL) {
335 0 : tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
336 0 : return tevent_req_post(req, ev);
337 : }
338 :
339 1857 : switch (in_locks[0].flags) {
340 128 : case SMB2_LOCK_FLAG_SHARED:
341 : case SMB2_LOCK_FLAG_EXCLUSIVE:
342 128 : if (in_lock_count > 1) {
343 0 : tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
344 0 : return tevent_req_post(req, ev);
345 : }
346 128 : state->blocking = true;
347 128 : break;
348 :
349 1029 : case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
350 : case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
351 1029 : break;
352 :
353 664 : case SMB2_LOCK_FLAG_UNLOCK:
354 : /* only the first lock gives the UNLOCK bit - see
355 : MS-SMB2 3.3.5.14 */
356 664 : isunlock = true;
357 664 : break;
358 :
359 36 : default:
360 36 : tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
361 36 : return tevent_req_post(req, ev);
362 : }
363 :
364 1821 : if (!isunlock && (in_lock_count > 1)) {
365 :
366 : /*
367 : * 3.3.5.14.2 says we SHOULD fail with INVALID_PARAMETER if we
368 : * have more than one lock and one of those is blocking.
369 : */
370 :
371 52 : for (i=0; i<in_lock_count; i++) {
372 40 : uint32_t flags = in_locks[i].flags;
373 :
374 40 : if ((flags & SMB2_LOCK_FLAG_FAIL_IMMEDIATELY) == 0) {
375 8 : tevent_req_nterror(
376 : req, NT_STATUS_INVALID_PARAMETER);
377 8 : return tevent_req_post(req, ev);
378 : }
379 : }
380 : }
381 :
382 3678 : for (i=0; i<in_lock_count; i++) {
383 1865 : bool invalid = false;
384 :
385 1865 : switch (in_locks[i].flags) {
386 128 : case SMB2_LOCK_FLAG_SHARED:
387 : case SMB2_LOCK_FLAG_EXCLUSIVE:
388 128 : if (isunlock) {
389 0 : invalid = true;
390 0 : break;
391 : }
392 128 : break;
393 :
394 1041 : case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
395 : case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
396 1041 : if (isunlock) {
397 8 : invalid = true;
398 : }
399 1041 : break;
400 :
401 692 : case SMB2_LOCK_FLAG_UNLOCK:
402 692 : if (!isunlock) {
403 0 : tevent_req_nterror(req,
404 : NT_STATUS_INVALID_PARAMETER);
405 0 : return tevent_req_post(req, ev);
406 : }
407 692 : break;
408 :
409 4 : default:
410 4 : if (isunlock) {
411 : /*
412 : * If the first element was a UNLOCK
413 : * we need to defer the error response
414 : * to the backend, because we need to process
415 : * all unlock elements before
416 : */
417 4 : invalid = true;
418 4 : break;
419 : }
420 0 : tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
421 0 : return tevent_req_post(req, ev);
422 : }
423 :
424 1865 : locks[i].req_guid = smbd_request_guid(smb2req->smb1req, i);
425 1865 : locks[i].smblctx = fsp->op->global->open_persistent_id;
426 1865 : locks[i].offset = in_locks[i].offset;
427 1865 : locks[i].count = in_locks[i].length;
428 :
429 1865 : if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
430 1037 : locks[i].brltype = WRITE_LOCK;
431 828 : } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
432 132 : locks[i].brltype = READ_LOCK;
433 696 : } else if (invalid) {
434 : /*
435 : * this is an invalid UNLOCK element
436 : * and the backend needs to test for
437 : * brltype != UNLOCK_LOCK and return
438 : * NT_STATUS_INVALID_PARAMETER
439 : */
440 4 : locks[i].brltype = READ_LOCK;
441 : } else {
442 692 : locks[i].brltype = UNLOCK_LOCK;
443 : }
444 :
445 1865 : DBG_DEBUG("index %"PRIu16" offset=%"PRIu64", count=%"PRIu64", "
446 : "smblctx = %"PRIu64" type %d\n",
447 : i,
448 : locks[i].offset,
449 : locks[i].count,
450 : locks[i].smblctx,
451 : (int)locks[i].brltype);
452 : }
453 :
454 1813 : state->locks = locks;
455 1813 : state->lock_count = in_lock_count;
456 :
457 1813 : if (isunlock) {
458 1315 : status = smbd_do_unlocking(
459 664 : state->smb1req, fsp, in_lock_count, locks, WINDOWS_LOCK);
460 :
461 664 : if (tevent_req_nterror(req, status)) {
462 124 : return tevent_req_post(req, ev);
463 : }
464 540 : tevent_req_done(req);
465 540 : return tevent_req_post(req, ev);
466 : }
467 :
468 1149 : smbd_smb2_lock_try(req);
469 1149 : if (!tevent_req_is_in_progress(req)) {
470 1113 : return tevent_req_post(req, ev);
471 : }
472 :
473 36 : tevent_req_defer_callback(req, smb2req->sconn->ev_ctx);
474 36 : aio_add_req_to_fsp(state->fsp, req);
475 36 : tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
476 :
477 36 : return req;
478 : }
479 :
480 3842 : static void smbd_smb2_lock_cleanup(struct tevent_req *req,
481 : enum tevent_req_state req_state)
482 : {
483 3842 : struct smbd_smb2_lock_state *state = tevent_req_data(
484 : req, struct smbd_smb2_lock_state);
485 :
486 3842 : if (req_state != TEVENT_REQ_DONE) {
487 2555 : return;
488 : }
489 :
490 1287 : if (state->lock_sequence_element != NULL) {
491 : /*
492 : * On success we remember the given/incoming
493 : * value (which was masked with 0xF.
494 : */
495 84 : *state->lock_sequence_element = state->lock_sequence_value;
496 : }
497 : }
498 :
499 4 : static void smbd_smb2_lock_update_retry_msecs(
500 : struct smbd_smb2_lock_state *state)
501 : {
502 : /*
503 : * The default lp_lock_spin_time() is 200ms,
504 : * we just use half of it to trigger the first retry.
505 : *
506 : * v_min is in the range of 0.001 to 10 secs
507 : * (0.1 secs by default)
508 : *
509 : * v_max is in the range of 0.01 to 100 secs
510 : * (1.0 secs by default)
511 : *
512 : * The typical steps are:
513 : * 0.1, 0.2, 0.3, 0.4, ... 1.0
514 : */
515 4 : uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()))/2;
516 4 : uint32_t v_max = 10 * v_min;
517 :
518 4 : if (state->retry_msecs >= v_max) {
519 0 : state->retry_msecs = v_max;
520 0 : return;
521 : }
522 :
523 4 : state->retry_msecs += v_min;
524 : }
525 :
526 4 : static void smbd_smb2_lock_update_polling_msecs(
527 : struct smbd_smb2_lock_state *state)
528 : {
529 : /*
530 : * The default lp_lock_spin_time() is 200ms.
531 : *
532 : * v_min is in the range of 0.002 to 20 secs
533 : * (0.2 secs by default)
534 : *
535 : * v_max is in the range of 0.02 to 200 secs
536 : * (2.0 secs by default)
537 : *
538 : * The typical steps are:
539 : * 0.2, 0.4, 0.6, 0.8, ... 2.0
540 : */
541 4 : uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()));
542 4 : uint32_t v_max = 10 * v_min;
543 :
544 4 : if (state->polling_msecs >= v_max) {
545 0 : state->polling_msecs = v_max;
546 0 : return;
547 : }
548 :
549 4 : state->polling_msecs += v_min;
550 : }
551 :
552 1165 : static void smbd_smb2_lock_try(struct tevent_req *req)
553 : {
554 1165 : struct smbd_smb2_lock_state *state = tevent_req_data(
555 : req, struct smbd_smb2_lock_state);
556 1165 : struct share_mode_lock *lck = NULL;
557 : uint16_t blocker_idx;
558 1165 : struct server_id blocking_pid = { 0 };
559 : uint64_t blocking_smblctx;
560 : NTSTATUS status;
561 1165 : struct tevent_req *subreq = NULL;
562 1165 : struct timeval endtime = { 0 };
563 :
564 1165 : lck = get_existing_share_mode_lock(
565 1165 : talloc_tos(), state->fsp->file_id);
566 1165 : if (tevent_req_nomem(lck, req)) {
567 1129 : return;
568 : }
569 :
570 2317 : status = smbd_do_locks_try(
571 : state->fsp,
572 : WINDOWS_LOCK,
573 1165 : state->lock_count,
574 : state->locks,
575 : &blocker_idx,
576 : &blocking_pid,
577 : &blocking_smblctx);
578 1165 : if (NT_STATUS_IS_OK(status)) {
579 683 : TALLOC_FREE(lck);
580 683 : tevent_req_done(req);
581 683 : return;
582 : }
583 482 : if (NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
584 : /*
585 : * We got NT_STATUS_RETRY,
586 : * we reset polling_msecs so that
587 : * that the retries based on LOCK_NOT_GRANTED
588 : * will later start with small intervalls again.
589 : */
590 8 : state->polling_msecs = 0;
591 :
592 : /*
593 : * The backend wasn't able to decide yet.
594 : * We need to wait even for non-blocking
595 : * locks.
596 : *
597 : * The backend uses blocking_smblctx == UINT64_MAX
598 : * to indicate that we should use retry timers.
599 : *
600 : * It uses blocking_smblctx == 0 to indicate
601 : * it will use share_mode_wakeup_waiters()
602 : * to wake us. Note that unrelated changes in
603 : * locking.tdb may cause retries.
604 : */
605 :
606 8 : if (blocking_smblctx != UINT64_MAX) {
607 4 : SMB_ASSERT(blocking_smblctx == 0);
608 4 : goto setup_retry;
609 : }
610 :
611 4 : smbd_smb2_lock_update_retry_msecs(state);
612 :
613 4 : DBG_DEBUG("Waiting for a backend decision. "
614 : "Retry in %"PRIu32" msecs\n",
615 : state->retry_msecs);
616 :
617 : /*
618 : * We completely ignore state->endtime here
619 : * we we'll wait for a backend decision forever.
620 : * If the backend is smart enough to implement
621 : * some NT_STATUS_RETRY logic, it has to
622 : * switch to any other status after in order
623 : * to avoid waiting forever.
624 : */
625 4 : endtime = timeval_current_ofs_msec(state->retry_msecs);
626 4 : goto setup_retry;
627 : }
628 474 : if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
629 : /*
630 : * This is a bug and will be changed into an assert
631 : * in future version. We should only
632 : * ever get NT_STATUS_LOCK_NOT_GRANTED here!
633 : */
634 : static uint64_t _bug_count;
635 0 : int _level = (_bug_count++ == 0) ? DBGLVL_ERR: DBGLVL_DEBUG;
636 0 : DBG_PREFIX(_level, ("BUG: Got %s mapping to "
637 : "NT_STATUS_LOCK_NOT_GRANTED\n",
638 : nt_errstr(status)));
639 0 : status = NT_STATUS_LOCK_NOT_GRANTED;
640 : }
641 474 : if (!NT_STATUS_EQUAL(status, NT_STATUS_LOCK_NOT_GRANTED)) {
642 12 : TALLOC_FREE(lck);
643 12 : tevent_req_nterror(req, status);
644 12 : return;
645 : }
646 : /*
647 : * We got LOCK_NOT_GRANTED, make sure
648 : * a following STATUS_RETRY will start
649 : * with short intervalls again.
650 : */
651 462 : state->retry_msecs = 0;
652 :
653 462 : if (!state->blocking) {
654 434 : TALLOC_FREE(lck);
655 434 : tevent_req_nterror(req, status);
656 434 : return;
657 : }
658 :
659 28 : if (blocking_smblctx == UINT64_MAX) {
660 4 : smbd_smb2_lock_update_polling_msecs(state);
661 :
662 4 : DBG_DEBUG("Blocked on a posix lock. Retry in %"PRIu32" msecs\n",
663 : state->polling_msecs);
664 :
665 4 : endtime = timeval_current_ofs_msec(state->polling_msecs);
666 : }
667 :
668 59 : setup_retry:
669 36 : DBG_DEBUG("Watching share mode lock\n");
670 :
671 36 : subreq = share_mode_watch_send(
672 : state, state->ev, lck, blocking_pid);
673 36 : TALLOC_FREE(lck);
674 36 : if (tevent_req_nomem(subreq, req)) {
675 0 : return;
676 : }
677 36 : tevent_req_set_callback(subreq, smbd_smb2_lock_retry, req);
678 :
679 36 : if (!timeval_is_zero(&endtime)) {
680 : bool ok;
681 :
682 8 : ok = tevent_req_set_endtime(subreq,
683 : state->ev,
684 : endtime);
685 8 : if (!ok) {
686 0 : tevent_req_oom(req);
687 0 : return;
688 : }
689 : }
690 : }
691 :
692 16 : static void smbd_smb2_lock_retry(struct tevent_req *subreq)
693 : {
694 16 : struct tevent_req *req = tevent_req_callback_data(
695 : subreq, struct tevent_req);
696 16 : struct smbd_smb2_lock_state *state = tevent_req_data(
697 : req, struct smbd_smb2_lock_state);
698 : NTSTATUS status;
699 : bool ok;
700 :
701 : /*
702 : * Make sure we run as the user again
703 : */
704 16 : ok = change_to_user_and_service_by_fsp(state->fsp);
705 16 : if (!ok) {
706 0 : tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
707 0 : return;
708 : }
709 :
710 16 : status = share_mode_watch_recv(subreq, NULL, NULL);
711 16 : TALLOC_FREE(subreq);
712 16 : if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) {
713 : /*
714 : * This is just a trigger for a timed retry.
715 : */
716 8 : status = NT_STATUS_OK;
717 : }
718 16 : if (tevent_req_nterror(req, status)) {
719 0 : return;
720 : }
721 :
722 16 : smbd_smb2_lock_try(req);
723 : }
724 :
725 1921 : static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
726 : {
727 1921 : return tevent_req_simple_recv_ntstatus(req);
728 : }
729 :
730 : /****************************************************************
731 : Cancel an outstanding blocking lock request.
732 : *****************************************************************/
733 :
734 24 : static bool smbd_smb2_lock_cancel(struct tevent_req *req)
735 : {
736 24 : struct smbd_smb2_request *smb2req = NULL;
737 24 : struct smbd_smb2_lock_state *state = tevent_req_data(req,
738 : struct smbd_smb2_lock_state);
739 24 : if (!state) {
740 0 : return false;
741 : }
742 :
743 24 : if (!state->smb2req) {
744 0 : return false;
745 : }
746 :
747 24 : smb2req = state->smb2req;
748 :
749 : /*
750 : * If the request is canceled because of close, logoff or tdis
751 : * the status is NT_STATUS_RANGE_NOT_LOCKED instead of
752 : * NT_STATUS_CANCELLED.
753 : */
754 40 : if (state->fsp->fsp_flags.closing ||
755 28 : !NT_STATUS_IS_OK(smb2req->session->status) ||
756 12 : !NT_STATUS_IS_OK(smb2req->tcon->status)) {
757 16 : tevent_req_nterror(req, NT_STATUS_RANGE_NOT_LOCKED);
758 16 : return true;
759 : }
760 :
761 8 : tevent_req_nterror(req, NT_STATUS_CANCELLED);
762 8 : return true;
763 : }
|