• R/O
  • HTTP
  • SSH
  • HTTPS

tomoyo-test1: 提交

This is a test repository.


Commit MetaInfo

修订版152036d1379ffd6985262743dcf6b0f9c75f83a4 (tree)
时间2020-05-12 04:04:52
作者Linus Torvalds <torvalds@linu...>
CommiterLinus Torvalds

Log Message

Fixes:

- Resolve a data integrity problem with NFSD that I inadvertently
introduced last year. The change I made makes the NFS server's
duplicate reply cache ineffective when krb5i or krb5p are in use,
thus allowing the replay of non-idempotent NFS requests such as
RENAME, SETATTR, or even WRITEs.
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v2.0.22 (GNU/Linux)

iQIcBAABAgAGBQJerCuyAAoJEDNqszNvZn+XxvAQAJmUW5412OO7mkI2IW5PDP71
ZnBAuTs4UpLBgp1VpS3ai0LYnOX9o8WLqolzGuFxGfK69ZZdh7U7fzX2aEytoTSP
KkW3dNo+NzRppWOhMBEfMBLnAu22YF+F689RvwEqd0C1AgGugaFfzlF1ECrJVpA7
g1WVhTi0ihfArhzSWTWO4LiuwjRd5TNF8gEci2j3DuHn1Hp6BagbKOv0rFdgK99X
BbK8IaEalBUjtpGAPgRU/WY/WznzhgARVeOX7Rh/P/zFdFB1G1M4kycaadBk6uaU
SHbdWBwDsYatDNuhZUI3Wv2g+DQ5LJRrjNNesLRot+kC3XD12sBCMsSI3owoz7Jt
u0s48YmOJO8uWi4kDenR9XV8bAaDmX7R/+XGZm1lethNrpBKat9EIrqSHNvqAXZ4
b3cC8/A/aCcOrWXtZnWqvJdqjx2EgL6DbcpaFheaPEekRofuiyOaAbXdlJQvzcwY
Sv4EC4ymABpQRg0si+Sya5Int7bZ9ryLZTSCMiLA+L1TnoW26XjMlGAaRqYi7Tx7
Qg4Bt400IIDE0FlE/76vE7b7YWQj7GfErA6moIyDio5AInRU9sHDFyB8iCfdpKxh
ajNl1NuEO/FSoXOGQvOo1uHD0vKvNVK21T6vQsRCT1f6JXtpiwTn6eLX4Wn9YLdI
iKqg2YXfdCbJnAuoxzGi
=hT3x
-----END PGP SIGNATURE-----

Merge tag 'nfsd-5.7-rc-2' of git://git.linux-nfs.org/projects/cel/cel-2.6

Pull nfsd fixes from Chuck Lever:

"Resolve a data integrity problem with NFSD that I inadvertently
introduced last year.
The change I made makes the NFS server's duplicate reply cache
ineffective when krb5i or krb5p are in use, thus allowing the replay
of non-idempotent NFS requests such as RENAME, SETATTR, or even
WRITEs"

* tag 'nfsd-5.7-rc-2' of git://git.linux-nfs.org/projects/cel/cel-2.6:

SUNRPC: Revert 241b1f419f0e ("SUNRPC: Remove xdr_buf_trim()")
SUNRPC: Fix GSS privacy computation of auth->au_ralign
SUNRPC: Add "@len" parameter to gss_unwrap()

更改概述

差异

--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -21,6 +21,7 @@
2121 struct gss_ctx {
2222 struct gss_api_mech *mech_type;
2323 void *internal_ctx_id;
24+ unsigned int slack, align;
2425 };
2526
2627 #define GSS_C_NO_BUFFER ((struct xdr_netobj) 0)
@@ -66,6 +67,7 @@ u32 gss_wrap(
6667 u32 gss_unwrap(
6768 struct gss_ctx *ctx_id,
6869 int offset,
70+ int len,
6971 struct xdr_buf *inbuf);
7072 u32 gss_delete_sec_context(
7173 struct gss_ctx **ctx_id);
@@ -126,6 +128,7 @@ struct gss_api_ops {
126128 u32 (*gss_unwrap)(
127129 struct gss_ctx *ctx_id,
128130 int offset,
131+ int len,
129132 struct xdr_buf *buf);
130133 void (*gss_delete_sec_context)(
131134 void *internal_ctx_id);
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -83,7 +83,7 @@ struct gss_krb5_enctype {
8383 u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset,
8484 struct xdr_buf *buf,
8585 struct page **pages); /* v2 encryption function */
86- u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset,
86+ u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len,
8787 struct xdr_buf *buf, u32 *headskip,
8888 u32 *tailskip); /* v2 decryption function */
8989 };
@@ -255,7 +255,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset,
255255 struct xdr_buf *outbuf, struct page **pages);
256256
257257 u32
258-gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
258+gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len,
259259 struct xdr_buf *buf);
260260
261261
@@ -312,7 +312,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
312312 struct page **pages);
313313
314314 u32
315-gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
315+gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
316316 struct xdr_buf *buf, u32 *plainoffset,
317317 u32 *plainlen);
318318
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -184,6 +184,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
184184 extern void xdr_shift_buf(struct xdr_buf *, size_t);
185185 extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
186186 extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
187+extern void xdr_buf_trim(struct xdr_buf *, unsigned int);
187188 extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
188189 extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
189190
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -2032,7 +2032,6 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
20322032 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
20332033 struct kvec *head = rqstp->rq_rcv_buf.head;
20342034 struct rpc_auth *auth = cred->cr_auth;
2035- unsigned int savedlen = rcv_buf->len;
20362035 u32 offset, opaque_len, maj_stat;
20372036 __be32 *p;
20382037
@@ -2043,9 +2042,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
20432042 offset = (u8 *)(p) - (u8 *)head->iov_base;
20442043 if (offset + opaque_len > rcv_buf->len)
20452044 goto unwrap_failed;
2046- rcv_buf->len = offset + opaque_len;
20472045
2048- maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
2046+ maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset,
2047+ offset + opaque_len, rcv_buf);
20492048 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
20502049 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
20512050 if (maj_stat != GSS_S_COMPLETE)
@@ -2059,10 +2058,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
20592058 */
20602059 xdr_init_decode(xdr, rcv_buf, p, rqstp);
20612060
2062- auth->au_rslack = auth->au_verfsize + 2 +
2063- XDR_QUADLEN(savedlen - rcv_buf->len);
2064- auth->au_ralign = auth->au_verfsize + 2 +
2065- XDR_QUADLEN(savedlen - rcv_buf->len);
2061+ auth->au_rslack = auth->au_verfsize + 2 + ctx->gc_gss_ctx->slack;
2062+ auth->au_ralign = auth->au_verfsize + 2 + ctx->gc_gss_ctx->align;
2063+
20662064 return 0;
20672065 unwrap_failed:
20682066 trace_rpcgss_unwrap_failed(task);
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -851,8 +851,8 @@ out_err:
851851 }
852852
853853 u32
854-gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
855- u32 *headskip, u32 *tailskip)
854+gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
855+ struct xdr_buf *buf, u32 *headskip, u32 *tailskip)
856856 {
857857 struct xdr_buf subbuf;
858858 u32 ret = 0;
@@ -881,7 +881,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
881881
882882 /* create a segment skipping the header and leaving out the checksum */
883883 xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
884- (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
884+ (len - offset - GSS_KRB5_TOK_HDR_LEN -
885885 kctx->gk5e->cksumlength));
886886
887887 nblocks = (subbuf.len + blocksize - 1) / blocksize;
@@ -926,7 +926,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
926926 goto out_err;
927927
928928 /* Get the packet's hmac value */
929- ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
929+ ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength,
930930 pkt_hmac, kctx->gk5e->cksumlength);
931931 if (ret)
932932 goto out_err;
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -261,7 +261,9 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
261261 }
262262
263263 static u32
264-gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
264+gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
265+ struct xdr_buf *buf, unsigned int *slack,
266+ unsigned int *align)
265267 {
266268 int signalg;
267269 int sealalg;
@@ -279,12 +281,13 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
279281 u32 conflen = kctx->gk5e->conflen;
280282 int crypt_offset;
281283 u8 *cksumkey;
284+ unsigned int saved_len = buf->len;
282285
283286 dprintk("RPC: gss_unwrap_kerberos\n");
284287
285288 ptr = (u8 *)buf->head[0].iov_base + offset;
286289 if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
287- buf->len - offset))
290+ len - offset))
288291 return GSS_S_DEFECTIVE_TOKEN;
289292
290293 if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
@@ -324,6 +327,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
324327 (!kctx->initiate && direction != 0))
325328 return GSS_S_BAD_SIG;
326329
330+ buf->len = len;
327331 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
328332 struct crypto_sync_skcipher *cipher;
329333 int err;
@@ -376,11 +380,15 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
376380 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
377381 memmove(orig_start, data_start, data_len);
378382 buf->head[0].iov_len -= (data_start - orig_start);
379- buf->len -= (data_start - orig_start);
383+ buf->len = len - (data_start - orig_start);
380384
381385 if (gss_krb5_remove_padding(buf, blocksize))
382386 return GSS_S_DEFECTIVE_TOKEN;
383387
388+ /* slack must include room for krb5 padding */
389+ *slack = XDR_QUADLEN(saved_len - buf->len);
390+ /* The GSS blob always precedes the RPC message payload */
391+ *align = *slack;
384392 return GSS_S_COMPLETE;
385393 }
386394
@@ -486,7 +494,9 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
486494 }
487495
488496 static u32
489-gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
497+gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
498+ struct xdr_buf *buf, unsigned int *slack,
499+ unsigned int *align)
490500 {
491501 time64_t now;
492502 u8 *ptr;
@@ -532,7 +542,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
532542 if (rrc != 0)
533543 rotate_left(offset + 16, buf, rrc);
534544
535- err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
545+ err = (*kctx->gk5e->decrypt_v2)(kctx, offset, len, buf,
536546 &headskip, &tailskip);
537547 if (err)
538548 return GSS_S_FAILURE;
@@ -542,7 +552,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
542552 * it against the original
543553 */
544554 err = read_bytes_from_xdr_buf(buf,
545- buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
555+ len - GSS_KRB5_TOK_HDR_LEN - tailskip,
546556 decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
547557 if (err) {
548558 dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
@@ -568,18 +578,19 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
568578 * Note that buf->head[0].iov_len may indicate the available
569579 * head buffer space rather than that actually occupied.
570580 */
571- movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
581+ movelen = min_t(unsigned int, buf->head[0].iov_len, len);
572582 movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
573- if (offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
574- buf->head[0].iov_len)
575- return GSS_S_FAILURE;
583+ BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
584+ buf->head[0].iov_len);
576585 memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
577586 buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
578- buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
587+ buf->len = len - GSS_KRB5_TOK_HDR_LEN + headskip;
579588
580589 /* Trim off the trailing "extra count" and checksum blob */
581- buf->len -= ec + GSS_KRB5_TOK_HDR_LEN + tailskip;
590+ xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
582591
592+ *align = XDR_QUADLEN(GSS_KRB5_TOK_HDR_LEN + headskip);
593+ *slack = *align + XDR_QUADLEN(ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
583594 return GSS_S_COMPLETE;
584595 }
585596
@@ -603,7 +614,8 @@ gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
603614 }
604615
605616 u32
606-gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
617+gss_unwrap_kerberos(struct gss_ctx *gctx, int offset,
618+ int len, struct xdr_buf *buf)
607619 {
608620 struct krb5_ctx *kctx = gctx->internal_ctx_id;
609621
@@ -613,9 +625,11 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
613625 case ENCTYPE_DES_CBC_RAW:
614626 case ENCTYPE_DES3_CBC_RAW:
615627 case ENCTYPE_ARCFOUR_HMAC:
616- return gss_unwrap_kerberos_v1(kctx, offset, buf);
628+ return gss_unwrap_kerberos_v1(kctx, offset, len, buf,
629+ &gctx->slack, &gctx->align);
617630 case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
618631 case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
619- return gss_unwrap_kerberos_v2(kctx, offset, buf);
632+ return gss_unwrap_kerberos_v2(kctx, offset, len, buf,
633+ &gctx->slack, &gctx->align);
620634 }
621635 }
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -411,10 +411,11 @@ gss_wrap(struct gss_ctx *ctx_id,
411411 u32
412412 gss_unwrap(struct gss_ctx *ctx_id,
413413 int offset,
414+ int len,
414415 struct xdr_buf *buf)
415416 {
416417 return ctx_id->mech_type->gm_ops
417- ->gss_unwrap(ctx_id, offset, buf);
418+ ->gss_unwrap(ctx_id, offset, len, buf);
418419 }
419420
420421
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -906,7 +906,7 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
906906 if (svc_getnl(&buf->head[0]) != seq)
907907 goto out;
908908 /* trim off the mic and padding at the end before returning */
909- buf->len -= 4 + round_up_to_quad(mic.len);
909+ xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
910910 stat = 0;
911911 out:
912912 kfree(mic.data);
@@ -934,7 +934,7 @@ static int
934934 unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
935935 {
936936 u32 priv_len, maj_stat;
937- int pad, saved_len, remaining_len, offset;
937+ int pad, remaining_len, offset;
938938
939939 clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
940940
@@ -954,12 +954,8 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
954954 buf->len -= pad;
955955 fix_priv_head(buf, pad);
956956
957- /* Maybe it would be better to give gss_unwrap a length parameter: */
958- saved_len = buf->len;
959- buf->len = priv_len;
960- maj_stat = gss_unwrap(ctx, 0, buf);
957+ maj_stat = gss_unwrap(ctx, 0, priv_len, buf);
961958 pad = priv_len - buf->len;
962- buf->len = saved_len;
963959 buf->len -= pad;
964960 /* The upper layers assume the buffer is aligned on 4-byte boundaries.
965961 * In the krb5p case, at least, the data ends up offset, so we need to
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1150,6 +1150,47 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
11501150 }
11511151 EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
11521152
1153+/**
1154+ * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
1155+ * @buf: buf to be trimmed
1156+ * @len: number of bytes to reduce "buf" by
1157+ *
1158+ * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
1159+ * that it's possible that we'll trim less than that amount if the xdr_buf is
1160+ * too small, or if (for instance) it's all in the head and the parser has
1161+ * already read too far into it.
1162+ */
1163+void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
1164+{
1165+ size_t cur;
1166+ unsigned int trim = len;
1167+
1168+ if (buf->tail[0].iov_len) {
1169+ cur = min_t(size_t, buf->tail[0].iov_len, trim);
1170+ buf->tail[0].iov_len -= cur;
1171+ trim -= cur;
1172+ if (!trim)
1173+ goto fix_len;
1174+ }
1175+
1176+ if (buf->page_len) {
1177+ cur = min_t(unsigned int, buf->page_len, trim);
1178+ buf->page_len -= cur;
1179+ trim -= cur;
1180+ if (!trim)
1181+ goto fix_len;
1182+ }
1183+
1184+ if (buf->head[0].iov_len) {
1185+ cur = min_t(size_t, buf->head[0].iov_len, trim);
1186+ buf->head[0].iov_len -= cur;
1187+ trim -= cur;
1188+ }
1189+fix_len:
1190+ buf->len -= (len - trim);
1191+}
1192+EXPORT_SYMBOL_GPL(xdr_buf_trim);
1193+
11531194 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
11541195 {
11551196 unsigned int this_len;
Show on old repository browser