1 /** @file
2 
3   A brief file description
4 
5   @section license License
6 
7   Licensed to the Apache Software Foundation (ASF) under one
8   or more contributor license agreements.  See the NOTICE file
9   distributed with this work for additional information
10   regarding copyright ownership.  The ASF licenses this file
11   to you under the Apache License, Version 2.0 (the
12   "License"); you may not use this file except in compliance
13   with the License.  You may obtain a copy of the License at
14 
15       http://www.apache.org/licenses/LICENSE-2.0
16 
17   Unless required by applicable law or agreed to in writing, software
18   distributed under the License is distributed on an "AS IS" BASIS,
19   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20   See the License for the specific language governing permissions and
21   limitations under the License.
22  */
23 
24 #pragma once
25 
26 #include "tscore/ink_platform.h"
27 #include "tscore/ink_resource.h"
28 
29 // TODO: I think we're overly aggressive here on making MIOBuffer 64-bit
30 // but not sure it's worthwhile changing anything to 32-bit honestly.
31 
32 //////////////////////////////////////////////////////////////
33 //
34 // returns 0 for DEFAULT_BUFFER_BASE_SIZE,
35 // +1 for each power of 2
36 //
37 //////////////////////////////////////////////////////////////
38 TS_INLINE int64_t
buffer_size_to_index(int64_t size,int64_t max=max_iobuffer_size)39 buffer_size_to_index(int64_t size, int64_t max = max_iobuffer_size)
40 {
41   int64_t r = max;
42 
43   while (r && BUFFER_SIZE_FOR_INDEX(r - 1) >= size) {
44     r--;
45   }
46   return r;
47 }
48 
49 TS_INLINE int64_t
iobuffer_size_to_index(int64_t size,int64_t max)50 iobuffer_size_to_index(int64_t size, int64_t max)
51 {
52   if (size > BUFFER_SIZE_FOR_INDEX(max)) {
53     return BUFFER_SIZE_INDEX_FOR_XMALLOC_SIZE(size);
54   }
55   return buffer_size_to_index(size, max);
56 }
57 
58 TS_INLINE int64_t
index_to_buffer_size(int64_t idx)59 index_to_buffer_size(int64_t idx)
60 {
61   if (BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(idx)) {
62     return BUFFER_SIZE_FOR_INDEX(idx);
63   } else if (BUFFER_SIZE_INDEX_IS_XMALLOCED(idx)) {
64     return BUFFER_SIZE_FOR_XMALLOC(idx);
65     // coverity[dead_error_condition]
66   } else if (BUFFER_SIZE_INDEX_IS_CONSTANT(idx)) {
67     return BUFFER_SIZE_FOR_CONSTANT(idx);
68   }
69   // coverity[dead_error_line]
70   return 0;
71 }
72 
73 TS_INLINE IOBufferBlock *
iobufferblock_clone(IOBufferBlock * src,int64_t offset,int64_t len)74 iobufferblock_clone(IOBufferBlock *src, int64_t offset, int64_t len)
75 {
76   IOBufferBlock *start_buf   = nullptr;
77   IOBufferBlock *current_buf = nullptr;
78 
79   while (src && len >= 0) {
80     char *start       = src->_start;
81     char *end         = src->_end;
82     int64_t max_bytes = end - start;
83 
84     max_bytes -= offset;
85     if (max_bytes <= 0) {
86       offset = -max_bytes;
87       src    = src->next.get();
88       continue;
89     }
90 
91     int64_t bytes = len;
92     if (bytes >= max_bytes) {
93       bytes = max_bytes;
94     }
95 
96     IOBufferBlock *new_buf = src->clone();
97     new_buf->_start += offset;
98     new_buf->_buf_end = new_buf->_end = new_buf->_start + bytes;
99 
100     if (!start_buf) {
101       start_buf   = new_buf;
102       current_buf = start_buf;
103     } else {
104       current_buf->next = new_buf;
105       current_buf       = new_buf;
106     }
107 
108     len -= bytes;
109     src    = src->next.get();
110     offset = 0;
111   }
112 
113   return start_buf;
114 }
115 
116 TS_INLINE IOBufferBlock *
iobufferblock_skip(IOBufferBlock * b,int64_t * poffset,int64_t * plen,int64_t write)117 iobufferblock_skip(IOBufferBlock *b, int64_t *poffset, int64_t *plen, int64_t write)
118 {
119   int64_t offset = *poffset;
120   int64_t len    = write;
121 
122   while (b && len >= 0) {
123     int64_t max_bytes = b->read_avail();
124 
125     // If this block ends before the start offset, skip it
126     // and adjust the offset to consume its length.
127     max_bytes -= offset;
128     if (max_bytes <= 0) {
129       offset = -max_bytes;
130       b      = b->next.get();
131       continue;
132     }
133 
134     if (len >= max_bytes) {
135       b = b->next.get();
136       len -= max_bytes;
137       offset = 0;
138     } else {
139       offset = offset + len;
140       break;
141     }
142   }
143 
144   *poffset = offset;
145   *plen -= write;
146   return b;
147 }
148 
149 TS_INLINE void
iobuffer_mem_inc(const char * _loc,int64_t _size_index)150 iobuffer_mem_inc(const char *_loc, int64_t _size_index)
151 {
152   if (!res_track_memory) {
153     return;
154   }
155 
156   if (!BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(_size_index)) {
157     return;
158   }
159 
160   if (!_loc) {
161     _loc = "memory/IOBuffer/UNKNOWN-LOCATION";
162   }
163   ResourceTracker::increment(_loc, index_to_buffer_size(_size_index));
164 }
165 
166 TS_INLINE void
iobuffer_mem_dec(const char * _loc,int64_t _size_index)167 iobuffer_mem_dec(const char *_loc, int64_t _size_index)
168 {
169   if (!res_track_memory) {
170     return;
171   }
172 
173   if (!BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(_size_index)) {
174     return;
175   }
176   if (!_loc) {
177     _loc = "memory/IOBuffer/UNKNOWN-LOCATION";
178   }
179   ResourceTracker::increment(_loc, -index_to_buffer_size(_size_index));
180 }
181 
182 //////////////////////////////////////////////////////////////////
183 //
184 // inline functions definitions
185 //
186 //////////////////////////////////////////////////////////////////
187 //////////////////////////////////////////////////////////////////
188 //
189 //  class IOBufferData --
190 //         inline functions definitions
191 //
192 //////////////////////////////////////////////////////////////////
193 TS_INLINE int64_t
block_size()194 IOBufferData::block_size()
195 {
196   return index_to_buffer_size(_size_index);
197 }
198 
199 TS_INLINE IOBufferData *
new_IOBufferData_internal(const char * location,void * b,int64_t size,int64_t asize_index)200 new_IOBufferData_internal(const char *location, void *b, int64_t size, int64_t asize_index)
201 {
202   (void)size;
203   IOBufferData *d = THREAD_ALLOC(ioDataAllocator, this_thread());
204   d->_size_index  = asize_index;
205   ink_assert(BUFFER_SIZE_INDEX_IS_CONSTANT(asize_index) || size <= d->block_size());
206   d->_location = location;
207   d->_data     = (char *)b;
208   return d;
209 }
210 
211 TS_INLINE IOBufferData *
new_xmalloc_IOBufferData_internal(const char * location,void * b,int64_t size)212 new_xmalloc_IOBufferData_internal(const char *location, void *b, int64_t size)
213 {
214   return new_IOBufferData_internal(location, b, size, BUFFER_SIZE_INDEX_FOR_XMALLOC_SIZE(size));
215 }
216 
217 TS_INLINE IOBufferData *
new_IOBufferData_internal(const char * location,void * b,int64_t size)218 new_IOBufferData_internal(const char *location, void *b, int64_t size)
219 {
220   return new_IOBufferData_internal(location, b, size, iobuffer_size_to_index(size));
221 }
222 
223 TS_INLINE IOBufferData *
new_IOBufferData_internal(const char * loc,int64_t size_index,AllocType type)224 new_IOBufferData_internal(const char *loc, int64_t size_index, AllocType type)
225 {
226   IOBufferData *d = THREAD_ALLOC(ioDataAllocator, this_thread());
227   d->_location    = loc;
228   d->alloc(size_index, type);
229   return d;
230 }
231 
232 // IRIX has a compiler bug which prevents this function
233 // from being compiled correctly at -O3
234 // so it is DUPLICATED in IOBuffer.cc
235 // ****** IF YOU CHANGE THIS FUNCTION change that one as well.
236 TS_INLINE void
alloc(int64_t size_index,AllocType type)237 IOBufferData::alloc(int64_t size_index, AllocType type)
238 {
239   if (_data) {
240     dealloc();
241   }
242   _size_index = size_index;
243   _mem_type   = type;
244   iobuffer_mem_inc(_location, size_index);
245   switch (type) {
246   case MEMALIGNED:
247     if (BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(size_index)) {
248       _data = (char *)ioBufAllocator[size_index].alloc_void();
249       // coverity[dead_error_condition]
250     } else if (BUFFER_SIZE_INDEX_IS_XMALLOCED(size_index)) {
251       _data = (char *)ats_memalign(ats_pagesize(), index_to_buffer_size(size_index));
252     }
253     break;
254   default:
255   case DEFAULT_ALLOC:
256     if (BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(size_index)) {
257       _data = (char *)ioBufAllocator[size_index].alloc_void();
258     } else if (BUFFER_SIZE_INDEX_IS_XMALLOCED(size_index)) {
259       _data = (char *)ats_malloc(BUFFER_SIZE_FOR_XMALLOC(size_index));
260     }
261     break;
262   }
263 }
264 
265 // ****** IF YOU CHANGE THIS FUNCTION change that one as well.
266 
267 TS_INLINE void
dealloc()268 IOBufferData::dealloc()
269 {
270   iobuffer_mem_dec(_location, _size_index);
271   switch (_mem_type) {
272   case MEMALIGNED:
273     if (BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(_size_index)) {
274       ioBufAllocator[_size_index].free_void(_data);
275     } else if (BUFFER_SIZE_INDEX_IS_XMALLOCED(_size_index)) {
276       ::free((void *)_data);
277     }
278     break;
279   default:
280   case DEFAULT_ALLOC:
281     if (BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(_size_index)) {
282       ioBufAllocator[_size_index].free_void(_data);
283     } else if (BUFFER_SIZE_INDEX_IS_XMALLOCED(_size_index)) {
284       ats_free(_data);
285     }
286     break;
287   }
288   _data       = nullptr;
289   _size_index = BUFFER_SIZE_NOT_ALLOCATED;
290   _mem_type   = NO_ALLOC;
291 }
292 
293 TS_INLINE void
free()294 IOBufferData::free()
295 {
296   dealloc();
297   THREAD_FREE(this, ioDataAllocator, this_thread());
298 }
299 
300 //////////////////////////////////////////////////////////////////
301 //
302 //  class IOBufferBlock --
303 //         inline functions definitions
304 //
305 //////////////////////////////////////////////////////////////////
306 TS_INLINE IOBufferBlock *
new_IOBufferBlock_internal(const char * location)307 new_IOBufferBlock_internal(const char *location)
308 {
309   IOBufferBlock *b = THREAD_ALLOC(ioBlockAllocator, this_thread());
310   b->_location     = location;
311   return b;
312 }
313 
314 TS_INLINE IOBufferBlock *
new_IOBufferBlock_internal(const char * location,IOBufferData * d,int64_t len,int64_t offset)315 new_IOBufferBlock_internal(const char *location, IOBufferData *d, int64_t len, int64_t offset)
316 {
317   IOBufferBlock *b = THREAD_ALLOC(ioBlockAllocator, this_thread());
318   b->_location     = location;
319   b->set(d, len, offset);
320   return b;
321 }
322 
323 TS_INLINE
IOBufferBlock()324 IOBufferBlock::IOBufferBlock()
325 {
326   return;
327 }
328 
329 TS_INLINE void
consume(int64_t len)330 IOBufferBlock::consume(int64_t len)
331 {
332   _start += len;
333   ink_assert(_start <= _end);
334 }
335 
336 TS_INLINE void
fill(int64_t len)337 IOBufferBlock::fill(int64_t len)
338 {
339   _end += len;
340   ink_assert(_end <= _buf_end);
341 }
342 
343 TS_INLINE void
reset()344 IOBufferBlock::reset()
345 {
346   _end = _start = buf();
347   _buf_end      = buf() + data->block_size();
348 }
349 
350 TS_INLINE void
alloc(int64_t i)351 IOBufferBlock::alloc(int64_t i)
352 {
353   ink_assert(BUFFER_SIZE_ALLOCATED(i));
354   data = new_IOBufferData_internal(_location, i);
355   reset();
356 }
357 
358 TS_INLINE void
clear()359 IOBufferBlock::clear()
360 {
361   data = nullptr;
362 
363   IOBufferBlock *p = next.get();
364   while (p) {
365     // If our block pointer refcount dropped to zero,
366     // recursively free the list.
367     if (p->refcount_dec() == 0) {
368       IOBufferBlock *n = p->next.detach();
369       p->free();
370       p = n;
371     } else {
372       // We don't hold the last refcount, so we are done.
373       break;
374     }
375   }
376 
377   // Nuke the next pointer without dropping the refcount
378   // because we already manually did that.
379   next.detach();
380 
381   _buf_end = _end = _start = nullptr;
382 }
383 
384 TS_INLINE IOBufferBlock *
clone() const385 IOBufferBlock::clone() const
386 {
387   IOBufferBlock *b = new_IOBufferBlock_internal(_location);
388   b->data          = data;
389   b->_start        = _start;
390   b->_end          = _end;
391   b->_buf_end      = _end;
392   b->_location     = _location;
393   return b;
394 }
395 
396 TS_INLINE void
dealloc()397 IOBufferBlock::dealloc()
398 {
399   clear();
400 }
401 
402 TS_INLINE void
free()403 IOBufferBlock::free()
404 {
405   dealloc();
406   THREAD_FREE(this, ioBlockAllocator, this_thread());
407 }
408 
409 TS_INLINE void
set_internal(void * b,int64_t len,int64_t asize_index)410 IOBufferBlock::set_internal(void *b, int64_t len, int64_t asize_index)
411 {
412   data        = new_IOBufferData_internal(_location, BUFFER_SIZE_NOT_ALLOCATED);
413   data->_data = (char *)b;
414   iobuffer_mem_inc(_location, asize_index);
415   data->_size_index = asize_index;
416   reset();
417   _end = _start + len;
418 }
419 
420 TS_INLINE void
set(IOBufferData * d,int64_t len,int64_t offset)421 IOBufferBlock::set(IOBufferData *d, int64_t len, int64_t offset)
422 {
423   data     = d;
424   _start   = buf() + offset;
425   _end     = _start + len;
426   _buf_end = buf() + d->block_size();
427 }
428 
429 //////////////////////////////////////////////////////////////////
430 //
431 //  class IOBufferReader --
432 //         inline functions definitions
433 //
434 //////////////////////////////////////////////////////////////////
435 TS_INLINE void
skip_empty_blocks()436 IOBufferReader::skip_empty_blocks()
437 {
438   while (block->next && block->next->read_avail() && start_offset >= block->size()) {
439     start_offset -= block->size();
440     block = block->next;
441   }
442 }
443 
444 TS_INLINE bool
low_water()445 IOBufferReader::low_water()
446 {
447   return mbuf->low_water();
448 }
449 
450 TS_INLINE bool
high_water()451 IOBufferReader::high_water()
452 {
453   return read_avail() >= mbuf->water_mark;
454 }
455 
456 TS_INLINE bool
current_low_water()457 IOBufferReader::current_low_water()
458 {
459   return mbuf->current_low_water();
460 }
461 
462 TS_INLINE IOBufferBlock *
get_current_block()463 IOBufferReader::get_current_block()
464 {
465   return block.get();
466 }
467 
468 TS_INLINE char *
start()469 IOBufferReader::start()
470 {
471   if (!block) {
472     return nullptr;
473   }
474 
475   skip_empty_blocks();
476   return block->start() + start_offset;
477 }
478 
479 TS_INLINE char *
end()480 IOBufferReader::end()
481 {
482   if (!block) {
483     return nullptr;
484   }
485 
486   skip_empty_blocks();
487   return block->end();
488 }
489 
490 TS_INLINE int64_t
block_read_avail()491 IOBufferReader::block_read_avail()
492 {
493   if (!block) {
494     return 0;
495   }
496 
497   skip_empty_blocks();
498   return (int64_t)(block->end() - (block->start() + start_offset));
499 }
500 
501 inline std::string_view
block_read_view()502 IOBufferReader::block_read_view()
503 {
504   const char *start = this->start(); // empty blocks are skipped in here.
505   return start ? std::string_view{start, static_cast<size_t>(block->end() - start)} : std::string_view{};
506 }
507 
508 TS_INLINE int
block_count()509 IOBufferReader::block_count()
510 {
511   int count        = 0;
512   IOBufferBlock *b = block.get();
513 
514   while (b) {
515     count++;
516     b = b->next.get();
517   }
518 
519   return count;
520 }
521 
522 TS_INLINE int64_t
read_avail()523 IOBufferReader::read_avail()
524 {
525   int64_t t        = 0;
526   IOBufferBlock *b = block.get();
527 
528   while (b) {
529     t += b->read_avail();
530     b = b->next.get();
531   }
532 
533   t -= start_offset;
534   if (size_limit != INT64_MAX && t > size_limit) {
535     t = size_limit;
536   }
537 
538   return t;
539 }
540 
541 TS_INLINE bool
is_read_avail_more_than(int64_t size)542 IOBufferReader::is_read_avail_more_than(int64_t size)
543 {
544   int64_t t        = -start_offset;
545   IOBufferBlock *b = block.get();
546 
547   while (b) {
548     t += b->read_avail();
549     if (t > size) {
550       return true;
551     }
552     b = b->next.get();
553   }
554   return false;
555 }
556 
557 TS_INLINE void
consume(int64_t n)558 IOBufferReader::consume(int64_t n)
559 {
560   start_offset += n;
561   if (size_limit != INT64_MAX) {
562     size_limit -= n;
563   }
564 
565   ink_assert(size_limit >= 0);
566   if (!block) {
567     return;
568   }
569 
570   int64_t r = block->read_avail();
571   int64_t s = start_offset;
572   while (r <= s && block->next && block->next->read_avail()) {
573     s -= r;
574     start_offset = s;
575     block        = block->next;
576     r            = block->read_avail();
577   }
578 }
579 
operator [](int64_t i)580 TS_INLINE char &IOBufferReader::operator[](int64_t i)
581 {
582   static char default_ret = '\0'; // This is just to avoid compiler warnings...
583   IOBufferBlock *b        = block.get();
584 
585   i += start_offset;
586   while (b) {
587     int64_t bytes = b->read_avail();
588     if (bytes > i) {
589       return b->start()[i];
590     }
591     i -= bytes;
592     b = b->next.get();
593   }
594 
595   ink_release_assert(!"out of range");
596   // Never used, just to satisfy compilers not understanding the fatality of ink_release_assert().
597   return default_ret;
598 }
599 
600 TS_INLINE void
clear()601 IOBufferReader::clear()
602 {
603   accessor     = nullptr;
604   block        = nullptr;
605   mbuf         = nullptr;
606   start_offset = 0;
607   size_limit   = INT64_MAX;
608 }
609 
610 TS_INLINE void
reset()611 IOBufferReader::reset()
612 {
613   block        = mbuf->_writer;
614   start_offset = 0;
615   size_limit   = INT64_MAX;
616 }
617 
618 ////////////////////////////////////////////////////////////////
619 //
620 //  class MIOBuffer --
621 //      inline functions definitions
622 //
623 ////////////////////////////////////////////////////////////////
624 inkcoreapi extern ClassAllocator<MIOBuffer> ioAllocator;
625 ////////////////////////////////////////////////////////////////
626 //
627 //  MIOBuffer::MIOBuffer()
628 //
629 //  This constructor accepts a pre-allocated memory buffer,
630 //  wraps if in a IOBufferData and IOBufferBlock structures
631 //  and sets it as the current block.
632 //  NOTE that in this case the memory buffer will not be freed
633 //  by the MIOBuffer class. It is the user responsibility to
634 //  free the memory buffer. The wrappers (MIOBufferBlock and
635 //  MIOBufferData) will be freed by this class.
636 //
637 ////////////////////////////////////////////////////////////////
638 TS_INLINE
MIOBuffer(void * b,int64_t bufsize,int64_t aWater_mark)639 MIOBuffer::MIOBuffer(void *b, int64_t bufsize, int64_t aWater_mark)
640 {
641   _location = nullptr;
642   set(b, bufsize);
643   water_mark = aWater_mark;
644   size_index = BUFFER_SIZE_NOT_ALLOCATED;
645   return;
646 }
647 
648 TS_INLINE
MIOBuffer(int64_t default_size_index)649 MIOBuffer::MIOBuffer(int64_t default_size_index)
650 {
651   clear();
652   size_index = default_size_index;
653   _location  = nullptr;
654   return;
655 }
656 
657 TS_INLINE
MIOBuffer()658 MIOBuffer::MIOBuffer()
659 {
660   clear();
661   _location = nullptr;
662   return;
663 }
664 
665 TS_INLINE
~MIOBuffer()666 MIOBuffer::~MIOBuffer()
667 {
668   _writer = nullptr;
669   dealloc_all_readers();
670 }
671 
672 TS_INLINE MIOBuffer *
new_MIOBuffer_internal(const char * location,int64_t size_index)673 new_MIOBuffer_internal(const char *location, int64_t size_index)
674 {
675   MIOBuffer *b = THREAD_ALLOC(ioAllocator, this_thread());
676   b->_location = location;
677   b->alloc(size_index);
678   b->water_mark = 0;
679   return b;
680 }
681 
682 TS_INLINE void
free_MIOBuffer(MIOBuffer * mio)683 free_MIOBuffer(MIOBuffer *mio)
684 {
685   mio->_writer = nullptr;
686   mio->dealloc_all_readers();
687   THREAD_FREE(mio, ioAllocator, this_thread());
688 }
689 
690 TS_INLINE MIOBuffer *
new_empty_MIOBuffer_internal(const char * location,int64_t size_index)691 new_empty_MIOBuffer_internal(const char *location, int64_t size_index)
692 {
693   MIOBuffer *b  = THREAD_ALLOC(ioAllocator, this_thread());
694   b->size_index = size_index;
695   b->water_mark = 0;
696   b->_location  = location;
697   return b;
698 }
699 
700 TS_INLINE void
free_empty_MIOBuffer(MIOBuffer * mio)701 free_empty_MIOBuffer(MIOBuffer *mio)
702 {
703   THREAD_FREE(mio, ioAllocator, this_thread());
704 }
705 
706 TS_INLINE IOBufferReader *
alloc_accessor(MIOBufferAccessor * anAccessor)707 MIOBuffer::alloc_accessor(MIOBufferAccessor *anAccessor)
708 {
709   int i;
710   for (i = 0; i < MAX_MIOBUFFER_READERS; i++) {
711     if (!readers[i].allocated()) {
712       break;
713     }
714   }
715 
716   // TODO refactor code to return nullptr at some point
717   ink_release_assert(i < MAX_MIOBUFFER_READERS);
718 
719   IOBufferReader *e = &readers[i];
720   e->mbuf           = this;
721   e->reset();
722   e->accessor = anAccessor;
723 
724   return e;
725 }
726 
727 TS_INLINE IOBufferReader *
alloc_reader()728 MIOBuffer::alloc_reader()
729 {
730   int i;
731   for (i = 0; i < MAX_MIOBUFFER_READERS; i++) {
732     if (!readers[i].allocated()) {
733       break;
734     }
735   }
736 
737   // TODO refactor code to return nullptr at some point
738   ink_release_assert(i < MAX_MIOBUFFER_READERS);
739 
740   IOBufferReader *e = &readers[i];
741   e->mbuf           = this;
742   e->reset();
743   e->accessor = nullptr;
744 
745   return e;
746 }
747 
748 TS_INLINE int64_t
block_size()749 MIOBuffer::block_size()
750 {
751   return index_to_buffer_size(size_index);
752 }
753 TS_INLINE IOBufferReader *
clone_reader(IOBufferReader * r)754 MIOBuffer::clone_reader(IOBufferReader *r)
755 {
756   int i;
757   for (i = 0; i < MAX_MIOBUFFER_READERS; i++) {
758     if (!readers[i].allocated()) {
759       break;
760     }
761   }
762 
763   // TODO refactor code to return nullptr at some point
764   ink_release_assert(i < MAX_MIOBUFFER_READERS);
765 
766   IOBufferReader *e = &readers[i];
767   e->mbuf           = this;
768   e->accessor       = nullptr;
769   e->block          = r->block;
770   e->start_offset   = r->start_offset;
771   e->size_limit     = r->size_limit;
772   ink_assert(e->size_limit >= 0);
773 
774   return e;
775 }
776 
777 TS_INLINE int64_t
block_write_avail()778 MIOBuffer::block_write_avail()
779 {
780   IOBufferBlock *b = first_write_block();
781   return b ? b->write_avail() : 0;
782 }
783 
784 ////////////////////////////////////////////////////////////////
785 //
786 //  MIOBuffer::append_block()
787 //
788 //  Appends a block to writer->next and make it the current
789 //  block.
790 //  Note that the block is not appended to the end of the list.
791 //  That means that if writer->next was not null before this
792 //  call then the block that writer->next was pointing to will
793 //  have its reference count decremented and writer->next
794 //  will have a new value which is the new block.
795 //  In any case the new appended block becomes the current
796 //  block.
797 //
798 ////////////////////////////////////////////////////////////////
799 TS_INLINE void
append_block_internal(IOBufferBlock * b)800 MIOBuffer::append_block_internal(IOBufferBlock *b)
801 {
802   // It would be nice to remove an empty buffer at the beginning,
803   // but this breaks HTTP.
804   // if (!_writer || !_writer->read_avail())
805   if (!_writer) {
806     _writer = b;
807     init_readers();
808   } else {
809     ink_assert(!_writer->next || !_writer->next->read_avail());
810     _writer->next = b;
811     while (b->read_avail()) {
812       _writer = b;
813       b       = b->next.get();
814       if (!b) {
815         break;
816       }
817     }
818   }
819   while (_writer->next && !_writer->write_avail() && _writer->next->read_avail()) {
820     _writer = _writer->next;
821   }
822 }
823 
824 TS_INLINE void
append_block(IOBufferBlock * b)825 MIOBuffer::append_block(IOBufferBlock *b)
826 {
827   ink_assert(b->read_avail());
828   append_block_internal(b);
829 }
830 
831 ////////////////////////////////////////////////////////////////
832 //
833 //  MIOBuffer::append_block()
834 //
835 //  Allocate a block, appends it to current->next
836 //  and make the new block the current block (writer).
837 //
838 ////////////////////////////////////////////////////////////////
839 TS_INLINE void
append_block(int64_t asize_index)840 MIOBuffer::append_block(int64_t asize_index)
841 {
842   ink_assert(BUFFER_SIZE_ALLOCATED(asize_index));
843   IOBufferBlock *b = new_IOBufferBlock_internal(_location);
844   b->alloc(asize_index);
845   append_block_internal(b);
846   return;
847 }
848 
849 TS_INLINE void
add_block()850 MIOBuffer::add_block()
851 {
852   if (this->_writer == nullptr || this->_writer->next == nullptr) {
853     append_block(size_index);
854   }
855 }
856 
857 TS_INLINE void
check_add_block()858 MIOBuffer::check_add_block()
859 {
860   if (!high_water() && current_low_water()) {
861     add_block();
862   }
863 }
864 
865 TS_INLINE IOBufferBlock *
get_current_block()866 MIOBuffer::get_current_block()
867 {
868   return first_write_block();
869 }
870 
871 //////////////////////////////////////////////////////////////////
872 //
873 //  MIOBuffer::current_write_avail()
874 //
875 //  returns the total space available in all blocks.
876 //  This function is different than write_avail() because
877 //  it will not append a new block if there is no space
878 //  or below the watermark space available.
879 //
880 //////////////////////////////////////////////////////////////////
881 TS_INLINE int64_t
current_write_avail()882 MIOBuffer::current_write_avail()
883 {
884   int64_t t        = 0;
885   IOBufferBlock *b = _writer.get();
886   while (b) {
887     t += b->write_avail();
888     b = b->next.get();
889   }
890   return t;
891 }
892 
893 //////////////////////////////////////////////////////////////////
894 //
895 //  MIOBuffer::write_avail()
896 //
897 //  returns the number of bytes available in the current block.
898 //  If there is no current block or not enough free space in
899 //  the current block then a new block is appended.
900 //
901 //////////////////////////////////////////////////////////////////
902 TS_INLINE int64_t
write_avail()903 MIOBuffer::write_avail()
904 {
905   check_add_block();
906   return current_write_avail();
907 }
908 
909 TS_INLINE void
fill(int64_t len)910 MIOBuffer::fill(int64_t len)
911 {
912   int64_t f = _writer->write_avail();
913   while (f < len) {
914     _writer->fill(f);
915     len -= f;
916     if (len > 0) {
917       _writer = _writer->next;
918     }
919     f = _writer->write_avail();
920   }
921   _writer->fill(len);
922 }
923 
924 TS_INLINE int
max_block_count()925 MIOBuffer::max_block_count()
926 {
927   int maxb = 0;
928   for (auto &reader : readers) {
929     if (reader.allocated()) {
930       int c = reader.block_count();
931       if (c > maxb) {
932         maxb = c;
933       }
934     }
935   }
936   return maxb;
937 }
938 
939 TS_INLINE int64_t
max_read_avail()940 MIOBuffer::max_read_avail()
941 {
942   int64_t s = 0;
943   int found = 0;
944   for (auto &reader : readers) {
945     if (reader.allocated()) {
946       int64_t ss = reader.read_avail();
947       if (ss > s) {
948         s = ss;
949       }
950       found = 1;
951     }
952   }
953   if (!found && _writer) {
954     return _writer->read_avail();
955   }
956   return s;
957 }
958 
959 TS_INLINE void
set(void * b,int64_t len)960 MIOBuffer::set(void *b, int64_t len)
961 {
962   _writer = new_IOBufferBlock_internal(_location);
963   _writer->set_internal(b, len, BUFFER_SIZE_INDEX_FOR_CONSTANT_SIZE(len));
964   init_readers();
965 }
966 
967 TS_INLINE void
append_xmalloced(void * b,int64_t len)968 MIOBuffer::append_xmalloced(void *b, int64_t len)
969 {
970   IOBufferBlock *x = new_IOBufferBlock_internal(_location);
971   x->set_internal(b, len, BUFFER_SIZE_INDEX_FOR_XMALLOC_SIZE(len));
972   append_block_internal(x);
973 }
974 
975 TS_INLINE void
append_fast_allocated(void * b,int64_t len,int64_t fast_size_index)976 MIOBuffer::append_fast_allocated(void *b, int64_t len, int64_t fast_size_index)
977 {
978   IOBufferBlock *x = new_IOBufferBlock_internal(_location);
979   x->set_internal(b, len, fast_size_index);
980   append_block_internal(x);
981 }
982 
983 TS_INLINE void
alloc(int64_t i)984 MIOBuffer::alloc(int64_t i)
985 {
986   _writer = new_IOBufferBlock_internal(_location);
987   _writer->alloc(i);
988   size_index = i;
989   init_readers();
990 }
991 
992 TS_INLINE void
dealloc_reader(IOBufferReader * e)993 MIOBuffer::dealloc_reader(IOBufferReader *e)
994 {
995   if (e->accessor) {
996     ink_assert(e->accessor->writer() == this);
997     ink_assert(e->accessor->reader() == e);
998     e->accessor->clear();
999   }
1000   e->clear();
1001 }
1002 
1003 TS_INLINE IOBufferReader *
clone()1004 IOBufferReader::clone()
1005 {
1006   return mbuf->clone_reader(this);
1007 }
1008 
1009 TS_INLINE void
dealloc()1010 IOBufferReader::dealloc()
1011 {
1012   mbuf->dealloc_reader(this);
1013 }
1014 
1015 TS_INLINE void
dealloc_all_readers()1016 MIOBuffer::dealloc_all_readers()
1017 {
1018   for (auto &reader : readers) {
1019     if (reader.allocated()) {
1020       dealloc_reader(&reader);
1021     }
1022   }
1023 }
1024 
1025 TS_INLINE void
reader_for(MIOBuffer * abuf)1026 MIOBufferAccessor::reader_for(MIOBuffer *abuf)
1027 {
1028   mbuf = abuf;
1029   if (abuf) {
1030     entry = mbuf->alloc_accessor(this);
1031   } else {
1032     entry = nullptr;
1033   }
1034 }
1035 
1036 TS_INLINE void
reader_for(IOBufferReader * areader)1037 MIOBufferAccessor::reader_for(IOBufferReader *areader)
1038 {
1039   if (entry == areader) {
1040     return;
1041   }
1042   mbuf  = areader->mbuf;
1043   entry = areader;
1044   ink_assert(mbuf);
1045 }
1046 
1047 TS_INLINE void
writer_for(MIOBuffer * abuf)1048 MIOBufferAccessor::writer_for(MIOBuffer *abuf)
1049 {
1050   mbuf  = abuf;
1051   entry = nullptr;
1052 }
1053 
1054 TS_INLINE
~MIOBufferAccessor()1055 MIOBufferAccessor::~MIOBufferAccessor() {}
1056