async_std/io/read/
read_to_end.rs

1use std::pin::Pin;
2use std::future::Future;
3
4use crate::io::{self, Read};
5use crate::task::{Context, Poll};
6
7#[doc(hidden)]
8#[allow(missing_debug_implementations)]
9pub struct ReadToEndFuture<'a, T: Unpin + ?Sized> {
10    pub(crate) reader: &'a mut T,
11    pub(crate) buf: &'a mut Vec<u8>,
12    pub(crate) start_len: usize,
13}
14
15impl<T: Read + Unpin + ?Sized> Future for ReadToEndFuture<'_, T> {
16    type Output = io::Result<usize>;
17
18    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
19        let Self {
20            reader,
21            buf,
22            start_len,
23        } = &mut *self;
24        read_to_end_internal(Pin::new(reader), cx, buf, *start_len)
25    }
26}
27
28// This uses an adaptive system to extend the vector when it fills. We want to
29// avoid paying to allocate and zero a huge chunk of memory if the reader only
30// has 4 bytes while still making large reads if the reader does have a ton
31// of data to return. Simply tacking on an extra DEFAULT_BUF_SIZE space every
32// time is 4,500 times (!) slower than this if the reader has a very small
33// amount of data to return.
34//
35// Because we're extending the buffer with uninitialized data for trusted
36// readers, we need to make sure to truncate that if any of this panics.
37pub fn read_to_end_internal<R: Read + ?Sized>(
38    mut rd: Pin<&mut R>,
39    cx: &mut Context<'_>,
40    buf: &mut Vec<u8>,
41    start_len: usize,
42) -> Poll<io::Result<usize>> {
43    struct Guard<'a> {
44        buf: &'a mut Vec<u8>,
45        len: usize,
46    }
47
48    impl Drop for Guard<'_> {
49        fn drop(&mut self) {
50            unsafe {
51                self.buf.set_len(self.len);
52            }
53        }
54    }
55
56    let mut g = Guard {
57        len: buf.len(),
58        buf,
59    };
60    let ret;
61    loop {
62        if g.len == g.buf.len() {
63            unsafe {
64                g.buf.reserve(32);
65                let capacity = g.buf.capacity();
66                g.buf.set_len(capacity);
67                super::initialize(&rd, &mut g.buf[g.len..]);
68            }
69        }
70
71        match futures_core::ready!(rd.as_mut().poll_read(cx, &mut g.buf[g.len..])) {
72            Ok(0) => {
73                ret = Poll::Ready(Ok(g.len - start_len));
74                break;
75            }
76            Ok(n) => g.len += n,
77            Err(e) => {
78                ret = Poll::Ready(Err(e));
79                break;
80            }
81        }
82    }
83
84    ret
85}