1 /** @file
2 
3   A brief file description
4 
5   @section license License
6 
7   Licensed to the Apache Software Foundation (ASF) under one
8   or more contributor license agreements.  See the NOTICE file
9   distributed with this work for additional information
10   regarding copyright ownership.  The ASF licenses this file
11   to you under the Apache License, Version 2.0 (the
12   "License"); you may not use this file except in compliance
13   with the License.  You may obtain a copy of the License at
14 
15       http://www.apache.org/licenses/LICENSE-2.0
16 
17   Unless required by applicable law or agreed to in writing, software
18   distributed under the License is distributed on an "AS IS" BASIS,
19   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20   See the License for the specific language governing permissions and
21   limitations under the License.
22  */
23 
24 #pragma once
25 
26 #include "tscore/ink_platform.h"
27 #include "tscore/ink_resource.h"
28 
29 // TODO: I think we're overly aggressive here on making MIOBuffer 64-bit
30 // but not sure it's worthwhile changing anything to 32-bit honestly.
31 
32 //////////////////////////////////////////////////////////////
33 //
34 // returns 0 for DEFAULT_BUFFER_BASE_SIZE,
35 // +1 for each power of 2
36 //
37 //////////////////////////////////////////////////////////////
38 TS_INLINE int64_t
39 buffer_size_to_index(int64_t size, int64_t max = max_iobuffer_size)
40 {
41   int64_t r = max;
42 
43   while (r && BUFFER_SIZE_FOR_INDEX(r - 1) >= size) {
44     r--;
45   }
46   return r;
47 }
48 
49 TS_INLINE int64_t
50 iobuffer_size_to_index(int64_t size, int64_t max)
51 {
52   if (size > BUFFER_SIZE_FOR_INDEX(max)) {
53     return BUFFER_SIZE_INDEX_FOR_XMALLOC_SIZE(size);
54   }
55   return buffer_size_to_index(size, max);
56 }
57 
58 TS_INLINE int64_t
59 index_to_buffer_size(int64_t idx)
60 {
61   if (BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(idx)) {
62     return BUFFER_SIZE_FOR_INDEX(idx);
63   } else if (BUFFER_SIZE_INDEX_IS_XMALLOCED(idx)) {
64     return BUFFER_SIZE_FOR_XMALLOC(idx);
65     // coverity[dead_error_condition]
66   } else if (BUFFER_SIZE_INDEX_IS_CONSTANT(idx)) {
67     return BUFFER_SIZE_FOR_CONSTANT(idx);
68   }
69   // coverity[dead_error_line]
70   return 0;
71 }
72 
73 TS_INLINE IOBufferBlock *
74 iobufferblock_clone(IOBufferBlock *src, int64_t offset, int64_t len)
75 {
76   IOBufferBlock *start_buf   = nullptr;
77   IOBufferBlock *current_buf = nullptr;
78 
79   while (src && len >= 0) {
80     char *start       = src->_start;
81     char *end         = src->_end;
82     int64_t max_bytes = end - start;
83 
84     max_bytes -= offset;
85     if (max_bytes <= 0) {
86       offset = -max_bytes;
87       src    = src->next.get();
88       continue;
89     }
90 
91     int64_t bytes = len;
92     if (bytes >= max_bytes) {
93       bytes = max_bytes;
94     }
95 
96     IOBufferBlock *new_buf = src->clone();
97     new_buf->_start += offset;
98     new_buf->_buf_end = new_buf->_end = new_buf->_start + bytes;
99 
100     if (!start_buf) {
101       start_buf   = new_buf;
102       current_buf = start_buf;
103     } else {
104       current_buf->next = new_buf;
105       current_buf       = new_buf;
106     }
107 
108     len -= bytes;
109     src    = src->next.get();
110     offset = 0;
111   }
112 
113   return start_buf;
114 }
115 
116 TS_INLINE IOBufferBlock *
117 iobufferblock_skip(IOBufferBlock *b, int64_t *poffset, int64_t *plen, int64_t write)
118 {
119   int64_t offset = *poffset;
120   int64_t len    = write;
121 
122   while (b && len >= 0) {
123     int64_t max_bytes = b->read_avail();
124 
125     // If this block ends before the start offset, skip it
126     // and adjust the offset to consume its length.
127     max_bytes -= offset;
128     if (max_bytes <= 0) {
129       offset = -max_bytes;
130       b      = b->next.get();
131       continue;
132     }
133 
134     if (len >= max_bytes) {
135       b = b->next.get();
136       len -= max_bytes;
137       offset = 0;
138     } else {
139       offset = offset + len;
140       break;
141     }
142   }
143 
144   *poffset = offset;
145   *plen -= write;
146   return b;
147 }
148 
149 #ifdef TRACK_BUFFER_USER
150 TS_INLINE void
151 iobuffer_mem_inc(const char *_loc, int64_t _size_index)
152 {
153   if (!res_track_memory) {
154     return;
155   }
156 
157   if (!BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(_size_index)) {
158     return;
159   }
160 
161   if (!_loc) {
162     _loc = "memory/IOBuffer/UNKNOWN-LOCATION";
163   }
164   ResourceTracker::increment(_loc, index_to_buffer_size(_size_index));
165 }
166 
167 TS_INLINE void
168 iobuffer_mem_dec(const char *_loc, int64_t _size_index)
169 {
170   if (!res_track_memory) {
171     return;
172   }
173 
174   if (!BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(_size_index)) {
175     return;
176   }
177   if (!_loc) {
178     _loc = "memory/IOBuffer/UNKNOWN-LOCATION";
179   }
180   ResourceTracker::increment(_loc, -index_to_buffer_size(_size_index));
181 }
182 #endif
183 
184 //////////////////////////////////////////////////////////////////
185 //
186 // inline functions definitions
187 //
188 //////////////////////////////////////////////////////////////////
189 //////////////////////////////////////////////////////////////////
190 //
191 //  class IOBufferData --
192 //         inline functions definitions
193 //
194 //////////////////////////////////////////////////////////////////
195 TS_INLINE int64_t
196 IOBufferData::block_size()
197 {
198   return index_to_buffer_size(_size_index);
199 }
200 
201 TS_INLINE IOBufferData *
202 new_IOBufferData_internal(
203 #ifdef TRACK_BUFFER_USER
204   const char *location,
205 #endif
206   void *b, int64_t size, int64_t asize_index)
207 {
208   (void)size;
209   IOBufferData *d = THREAD_ALLOC(ioDataAllocator, this_thread());
210   d->_size_index  = asize_index;
211   ink_assert(BUFFER_SIZE_INDEX_IS_CONSTANT(asize_index) || size <= d->block_size());
212 #ifdef TRACK_BUFFER_USER
213   d->_location = location;
214 #endif
215   d->_data = (char *)b;
216   return d;
217 }
218 
219 TS_INLINE IOBufferData *
220 new_xmalloc_IOBufferData_internal(
221 #ifdef TRACK_BUFFER_USER
222   const char *location,
223 #endif
224   void *b, int64_t size)
225 {
226   return new_IOBufferData_internal(
227 #ifdef TRACK_BUFFER_USER
228     location,
229 #endif
230     b, size, BUFFER_SIZE_INDEX_FOR_XMALLOC_SIZE(size));
231 }
232 
233 TS_INLINE IOBufferData *
234 new_IOBufferData_internal(
235 #ifdef TRACK_BUFFER_USER
236   const char *location,
237 #endif
238   void *b, int64_t size)
239 {
240   return new_IOBufferData_internal(
241 #ifdef TRACK_BUFFER_USER
242     location,
243 #endif
244     b, size, iobuffer_size_to_index(size));
245 }
246 
247 TS_INLINE IOBufferData *
248 new_IOBufferData_internal(
249 #ifdef TRACK_BUFFER_USER
250   const char *loc,
251 #endif
252   int64_t size_index, AllocType type)
253 {
254   IOBufferData *d = THREAD_ALLOC(ioDataAllocator, this_thread());
255 #ifdef TRACK_BUFFER_USER
256   d->_location = loc;
257 #endif
258   d->alloc(size_index, type);
259   return d;
260 }
261 
262 // IRIX has a compiler bug which prevents this function
263 // from being compiled correctly at -O3
264 // so it is DUPLICATED in IOBuffer.cc
265 // ****** IF YOU CHANGE THIS FUNCTION change that one as well.
266 TS_INLINE void
267 IOBufferData::alloc(int64_t size_index, AllocType type)
268 {
269   if (_data) {
270     dealloc();
271   }
272   _size_index = size_index;
273   _mem_type   = type;
274 #ifdef TRACK_BUFFER_USER
275   iobuffer_mem_inc(_location, size_index);
276 #endif
277   switch (type) {
278   case MEMALIGNED:
279     if (BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(size_index)) {
280       _data = (char *)ioBufAllocator[size_index].alloc_void();
281       // coverity[dead_error_condition]
282     } else if (BUFFER_SIZE_INDEX_IS_XMALLOCED(size_index)) {
283       _data = (char *)ats_memalign(ats_pagesize(), index_to_buffer_size(size_index));
284     }
285     break;
286   default:
287   case DEFAULT_ALLOC:
288     if (BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(size_index)) {
289       _data = (char *)ioBufAllocator[size_index].alloc_void();
290     } else if (BUFFER_SIZE_INDEX_IS_XMALLOCED(size_index)) {
291       _data = (char *)ats_malloc(BUFFER_SIZE_FOR_XMALLOC(size_index));
292     }
293     break;
294   }
295 }
296 
297 // ****** IF YOU CHANGE THIS FUNCTION change that one as well.
298 
299 TS_INLINE void
300 IOBufferData::dealloc()
301 {
302 #ifdef TRACK_BUFFER_USER
303   iobuffer_mem_dec(_location, _size_index);
304 #endif
305   switch (_mem_type) {
306   case MEMALIGNED:
307     if (BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(_size_index)) {
308       ioBufAllocator[_size_index].free_void(_data);
309     } else if (BUFFER_SIZE_INDEX_IS_XMALLOCED(_size_index)) {
310       ::free((void *)_data);
311     }
312     break;
313   default:
314   case DEFAULT_ALLOC:
315     if (BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(_size_index)) {
316       ioBufAllocator[_size_index].free_void(_data);
317     } else if (BUFFER_SIZE_INDEX_IS_XMALLOCED(_size_index)) {
318       ats_free(_data);
319     }
320     break;
321   }
322   _data       = nullptr;
323   _size_index = BUFFER_SIZE_NOT_ALLOCATED;
324   _mem_type   = NO_ALLOC;
325 }
326 
327 TS_INLINE void
328 IOBufferData::free()
329 {
330   dealloc();
331   THREAD_FREE(this, ioDataAllocator, this_thread());
332 }
333 
334 //////////////////////////////////////////////////////////////////
335 //
336 //  class IOBufferBlock --
337 //         inline functions definitions
338 //
339 //////////////////////////////////////////////////////////////////
340 TS_INLINE IOBufferBlock *
341 new_IOBufferBlock_internal(
342 #ifdef TRACK_BUFFER_USER
343   const char *location
344 #endif
345 )
346 {
347   IOBufferBlock *b = THREAD_ALLOC(ioBlockAllocator, this_thread());
348 #ifdef TRACK_BUFFER_USER
349   b->_location = location;
350 #endif
351   return b;
352 }
353 
354 TS_INLINE IOBufferBlock *
355 new_IOBufferBlock_internal(
356 #ifdef TRACK_BUFFER_USER
357   const char *location,
358 #endif
359   IOBufferData *d, int64_t len, int64_t offset)
360 {
361   IOBufferBlock *b = THREAD_ALLOC(ioBlockAllocator, this_thread());
362 #ifdef TRACK_BUFFER_USER
363   b->_location = location;
364 #endif
365   b->set(d, len, offset);
366   return b;
367 }
368 
369 TS_INLINE
370 IOBufferBlock::IOBufferBlock()
371 {
372   return;
373 }
374 
375 TS_INLINE void
376 IOBufferBlock::consume(int64_t len)
377 {
378   _start += len;
379   ink_assert(_start <= _end);
380 }
381 
382 TS_INLINE void
383 IOBufferBlock::fill(int64_t len)
384 {
385   _end += len;
386   ink_assert(_end <= _buf_end);
387 }
388 
389 TS_INLINE void
390 IOBufferBlock::reset()
391 {
392   _end = _start = buf();
393   _buf_end      = buf() + data->block_size();
394 }
395 
396 TS_INLINE void
397 IOBufferBlock::alloc(int64_t i)
398 {
399   ink_assert(BUFFER_SIZE_ALLOCATED(i));
400 #ifdef TRACK_BUFFER_USER
401   data = new_IOBufferData_internal(_location, i);
402 #else
403   data             = new_IOBufferData_internal(i);
404 #endif
405   reset();
406 }
407 
408 TS_INLINE void
409 IOBufferBlock::clear()
410 {
411   data = nullptr;
412 
413   IOBufferBlock *p = next.get();
414   while (p) {
415     // If our block pointer refcount dropped to zero,
416     // recursively free the list.
417     if (p->refcount_dec() == 0) {
418       IOBufferBlock *n = p->next.detach();
419       p->free();
420       p = n;
421     } else {
422       // We don't hold the last refcount, so we are done.
423       break;
424     }
425   }
426 
427   // Nuke the next pointer without dropping the refcount
428   // because we already manually did that.
429   next.detach();
430 
431   _buf_end = _end = _start = nullptr;
432 }
433 
434 TS_INLINE IOBufferBlock *
435 IOBufferBlock::clone() const
436 {
437 #ifdef TRACK_BUFFER_USER
438   IOBufferBlock *b = new_IOBufferBlock_internal(_location);
439 #else
440   IOBufferBlock *b = new_IOBufferBlock_internal();
441 #endif
442   b->data     = data;
443   b->_start   = _start;
444   b->_end     = _end;
445   b->_buf_end = _end;
446 #ifdef TRACK_BUFFER_USER
447   b->_location = _location;
448 #endif
449   return b;
450 }
451 
452 TS_INLINE void
453 IOBufferBlock::dealloc()
454 {
455   clear();
456 }
457 
458 TS_INLINE void
459 IOBufferBlock::free()
460 {
461   dealloc();
462   THREAD_FREE(this, ioBlockAllocator, this_thread());
463 }
464 
465 TS_INLINE void
466 IOBufferBlock::set_internal(void *b, int64_t len, int64_t asize_index)
467 {
468 #ifdef TRACK_BUFFER_USER
469   data = new_IOBufferData_internal(_location, BUFFER_SIZE_NOT_ALLOCATED);
470 #else
471   data             = new_IOBufferData_internal(BUFFER_SIZE_NOT_ALLOCATED);
472 #endif
473   data->_data = (char *)b;
474 #ifdef TRACK_BUFFER_USER
475   iobuffer_mem_inc(_location, asize_index);
476 #endif
477   data->_size_index = asize_index;
478   reset();
479   _end = _start + len;
480 }
481 
482 TS_INLINE void
483 IOBufferBlock::set(IOBufferData *d, int64_t len, int64_t offset)
484 {
485   data     = d;
486   _start   = buf() + offset;
487   _end     = _start + len;
488   _buf_end = buf() + d->block_size();
489 }
490 
491 TS_INLINE void
492 IOBufferBlock::realloc_set_internal(void *b, int64_t buf_size, int64_t asize_index)
493 {
494   int64_t data_size = size();
495   memcpy(b, _start, size());
496   dealloc();
497   set_internal(b, buf_size, asize_index);
498   _end = _start + data_size;
499 }
500 
501 TS_INLINE void
502 IOBufferBlock::realloc(void *b, int64_t buf_size)
503 {
504   realloc_set_internal(b, buf_size, BUFFER_SIZE_NOT_ALLOCATED);
505 }
506 
507 TS_INLINE void
508 IOBufferBlock::realloc(int64_t i)
509 {
510   if ((i == data->_size_index) || (i >= (int64_t)countof(ioBufAllocator))) {
511     return;
512   }
513 
514   ink_release_assert(i > data->_size_index && i != BUFFER_SIZE_NOT_ALLOCATED);
515   void *b = ioBufAllocator[i].alloc_void();
516   realloc_set_internal(b, BUFFER_SIZE_FOR_INDEX(i), i);
517 }
518 
519 //////////////////////////////////////////////////////////////////
520 //
521 //  class IOBufferReader --
522 //         inline functions definitions
523 //
524 //////////////////////////////////////////////////////////////////
525 TS_INLINE void
526 IOBufferReader::skip_empty_blocks()
527 {
528   while (block->next && block->next->read_avail() && start_offset >= block->size()) {
529     start_offset -= block->size();
530     block = block->next;
531   }
532 }
533 
534 TS_INLINE bool
535 IOBufferReader::low_water()
536 {
537   return mbuf->low_water();
538 }
539 
540 TS_INLINE bool
541 IOBufferReader::high_water()
542 {
543   return read_avail() >= mbuf->water_mark;
544 }
545 
546 TS_INLINE bool
547 IOBufferReader::current_low_water()
548 {
549   return mbuf->current_low_water();
550 }
551 
552 TS_INLINE IOBufferBlock *
553 IOBufferReader::get_current_block()
554 {
555   return block.get();
556 }
557 
558 TS_INLINE char *
559 IOBufferReader::start()
560 {
561   if (!block) {
562     return nullptr;
563   }
564 
565   skip_empty_blocks();
566   return block->start() + start_offset;
567 }
568 
569 TS_INLINE char *
570 IOBufferReader::end()
571 {
572   if (!block) {
573     return nullptr;
574   }
575 
576   skip_empty_blocks();
577   return block->end();
578 }
579 
580 TS_INLINE int64_t
581 IOBufferReader::block_read_avail()
582 {
583   if (!block) {
584     return 0;
585   }
586 
587   skip_empty_blocks();
588   return (int64_t)(block->end() - (block->start() + start_offset));
589 }
590 
591 inline std::string_view
592 IOBufferReader::block_read_view()
593 {
594   const char *start = this->start(); // empty blocks are skipped in here.
595   return start ? std::string_view{start, static_cast<size_t>(block->end() - start)} : std::string_view{};
596 }
597 
598 TS_INLINE int
599 IOBufferReader::block_count()
600 {
601   int count        = 0;
602   IOBufferBlock *b = block.get();
603 
604   while (b) {
605     count++;
606     b = b->next.get();
607   }
608 
609   return count;
610 }
611 
612 TS_INLINE int64_t
613 IOBufferReader::read_avail()
614 {
615   int64_t t        = 0;
616   IOBufferBlock *b = block.get();
617 
618   while (b) {
619     t += b->read_avail();
620     b = b->next.get();
621   }
622 
623   t -= start_offset;
624   if (size_limit != INT64_MAX && t > size_limit) {
625     t = size_limit;
626   }
627 
628   return t;
629 }
630 
631 TS_INLINE bool
632 IOBufferReader::is_read_avail_more_than(int64_t size)
633 {
634   int64_t t        = -start_offset;
635   IOBufferBlock *b = block.get();
636 
637   while (b) {
638     t += b->read_avail();
639     if (t > size) {
640       return true;
641     }
642     b = b->next.get();
643   }
644   return false;
645 }
646 
647 TS_INLINE void
648 IOBufferReader::consume(int64_t n)
649 {
650   start_offset += n;
651   if (size_limit != INT64_MAX) {
652     size_limit -= n;
653   }
654 
655   ink_assert(size_limit >= 0);
656   if (!block) {
657     return;
658   }
659 
660   int64_t r = block->read_avail();
661   int64_t s = start_offset;
662   while (r <= s && block->next && block->next->read_avail()) {
663     s -= r;
664     start_offset = s;
665     block        = block->next;
666     r            = block->read_avail();
667   }
668 }
669 
670 TS_INLINE char &IOBufferReader::operator[](int64_t i)
671 {
672   static char default_ret = '\0'; // This is just to avoid compiler warnings...
673   IOBufferBlock *b        = block.get();
674 
675   i += start_offset;
676   while (b) {
677     int64_t bytes = b->read_avail();
678     if (bytes > i) {
679       return b->start()[i];
680     }
681     i -= bytes;
682     b = b->next.get();
683   }
684 
685   ink_release_assert(!"out of range");
686   // Never used, just to satisfy compilers not understanding the fatality of ink_release_assert().
687   return default_ret;
688 }
689 
690 TS_INLINE void
691 IOBufferReader::clear()
692 {
693   accessor     = nullptr;
694   block        = nullptr;
695   mbuf         = nullptr;
696   start_offset = 0;
697   size_limit   = INT64_MAX;
698 }
699 
700 TS_INLINE void
701 IOBufferReader::reset()
702 {
703   block        = mbuf->_writer;
704   start_offset = 0;
705   size_limit   = INT64_MAX;
706 }
707 
708 ////////////////////////////////////////////////////////////////
709 //
710 //  class MIOBuffer --
711 //      inline functions definitions
712 //
713 ////////////////////////////////////////////////////////////////
714 inkcoreapi extern ClassAllocator<MIOBuffer> ioAllocator;
715 ////////////////////////////////////////////////////////////////
716 //
717 //  MIOBuffer::MIOBuffer()
718 //
719 //  This constructor accepts a pre-allocated memory buffer,
720 //  wraps if in a IOBufferData and IOBufferBlock structures
721 //  and sets it as the current block.
722 //  NOTE that in this case the memory buffer will not be freed
723 //  by the MIOBuffer class. It is the user responsibility to
724 //  free the memory buffer. The wrappers (MIOBufferBlock and
725 //  MIOBufferData) will be freed by this class.
726 //
727 ////////////////////////////////////////////////////////////////
728 TS_INLINE
729 MIOBuffer::MIOBuffer(void *b, int64_t bufsize, int64_t aWater_mark)
730 {
731 #ifdef TRACK_BUFFER_USER
732   _location = nullptr;
733 #endif
734   set(b, bufsize);
735   water_mark = aWater_mark;
736   size_index = BUFFER_SIZE_NOT_ALLOCATED;
737   return;
738 }
739 
740 TS_INLINE
741 MIOBuffer::MIOBuffer(int64_t default_size_index)
742 {
743   clear();
744   size_index = default_size_index;
745 #ifdef TRACK_BUFFER_USER
746   _location = nullptr;
747 #endif
748   return;
749 }
750 
751 TS_INLINE
752 MIOBuffer::MIOBuffer()
753 {
754   clear();
755 #ifdef TRACK_BUFFER_USER
756   _location = nullptr;
757 #endif
758   return;
759 }
760 
761 TS_INLINE
762 MIOBuffer::~MIOBuffer()
763 {
764   _writer = nullptr;
765   dealloc_all_readers();
766 }
767 
768 TS_INLINE MIOBuffer *
769 new_MIOBuffer_internal(
770 #ifdef TRACK_BUFFER_USER
771   const char *location,
772 #endif
773   int64_t size_index)
774 {
775   MIOBuffer *b = THREAD_ALLOC(ioAllocator, this_thread());
776 #ifdef TRACK_BUFFER_USER
777   b->_location = location;
778 #endif
779   b->alloc(size_index);
780   b->water_mark = 0;
781   return b;
782 }
783 
784 TS_INLINE void
785 free_MIOBuffer(MIOBuffer *mio)
786 {
787   mio->_writer = nullptr;
788   mio->dealloc_all_readers();
789   THREAD_FREE(mio, ioAllocator, this_thread());
790 }
791 
792 TS_INLINE MIOBuffer *
793 new_empty_MIOBuffer_internal(
794 #ifdef TRACK_BUFFER_USER
795   const char *location,
796 #endif
797   int64_t size_index)
798 {
799   MIOBuffer *b  = THREAD_ALLOC(ioAllocator, this_thread());
800   b->size_index = size_index;
801   b->water_mark = 0;
802 #ifdef TRACK_BUFFER_USER
803   b->_location = location;
804 #endif
805   return b;
806 }
807 
808 TS_INLINE void
809 free_empty_MIOBuffer(MIOBuffer *mio)
810 {
811   THREAD_FREE(mio, ioAllocator, this_thread());
812 }
813 
814 TS_INLINE IOBufferReader *
815 MIOBuffer::alloc_accessor(MIOBufferAccessor *anAccessor)
816 {
817   int i;
818   for (i = 0; i < MAX_MIOBUFFER_READERS; i++) {
819     if (!readers[i].allocated()) {
820       break;
821     }
822   }
823 
824   // TODO refactor code to return nullptr at some point
825   ink_release_assert(i < MAX_MIOBUFFER_READERS);
826 
827   IOBufferReader *e = &readers[i];
828   e->mbuf           = this;
829   e->reset();
830   e->accessor = anAccessor;
831 
832   return e;
833 }
834 
835 TS_INLINE IOBufferReader *
836 MIOBuffer::alloc_reader()
837 {
838   int i;
839   for (i = 0; i < MAX_MIOBUFFER_READERS; i++) {
840     if (!readers[i].allocated()) {
841       break;
842     }
843   }
844 
845   // TODO refactor code to return nullptr at some point
846   ink_release_assert(i < MAX_MIOBUFFER_READERS);
847 
848   IOBufferReader *e = &readers[i];
849   e->mbuf           = this;
850   e->reset();
851   e->accessor = nullptr;
852 
853   return e;
854 }
855 
856 TS_INLINE int64_t
857 MIOBuffer::block_size()
858 {
859   return index_to_buffer_size(size_index);
860 }
861 TS_INLINE IOBufferReader *
862 MIOBuffer::clone_reader(IOBufferReader *r)
863 {
864   int i;
865   for (i = 0; i < MAX_MIOBUFFER_READERS; i++) {
866     if (!readers[i].allocated()) {
867       break;
868     }
869   }
870 
871   // TODO refactor code to return nullptr at some point
872   ink_release_assert(i < MAX_MIOBUFFER_READERS);
873 
874   IOBufferReader *e = &readers[i];
875   e->mbuf           = this;
876   e->accessor       = nullptr;
877   e->block          = r->block;
878   e->start_offset   = r->start_offset;
879   e->size_limit     = r->size_limit;
880   ink_assert(e->size_limit >= 0);
881 
882   return e;
883 }
884 
885 TS_INLINE int64_t
886 MIOBuffer::block_write_avail()
887 {
888   IOBufferBlock *b = first_write_block();
889   return b ? b->write_avail() : 0;
890 }
891 
892 ////////////////////////////////////////////////////////////////
893 //
894 //  MIOBuffer::append_block()
895 //
896 //  Appends a block to writer->next and make it the current
897 //  block.
898 //  Note that the block is not appended to the end of the list.
899 //  That means that if writer->next was not null before this
900 //  call then the block that writer->next was pointing to will
901 //  have its reference count decremented and writer->next
902 //  will have a new value which is the new block.
903 //  In any case the new appended block becomes the current
904 //  block.
905 //
906 ////////////////////////////////////////////////////////////////
907 TS_INLINE void
908 MIOBuffer::append_block_internal(IOBufferBlock *b)
909 {
910   // It would be nice to remove an empty buffer at the beginning,
911   // but this breaks HTTP.
912   // if (!_writer || !_writer->read_avail())
913   if (!_writer) {
914     _writer = b;
915     init_readers();
916   } else {
917     ink_assert(!_writer->next || !_writer->next->read_avail());
918     _writer->next = b;
919     while (b->read_avail()) {
920       _writer = b;
921       b       = b->next.get();
922       if (!b) {
923         break;
924       }
925     }
926   }
927   while (_writer->next && !_writer->write_avail() && _writer->next->read_avail()) {
928     _writer = _writer->next;
929   }
930 }
931 
932 TS_INLINE void
933 MIOBuffer::append_block(IOBufferBlock *b)
934 {
935   ink_assert(b->read_avail());
936   append_block_internal(b);
937 }
938 
939 ////////////////////////////////////////////////////////////////
940 //
941 //  MIOBuffer::append_block()
942 //
943 //  Allocate a block, appends it to current->next
944 //  and make the new block the current block (writer).
945 //
946 ////////////////////////////////////////////////////////////////
947 TS_INLINE void
948 MIOBuffer::append_block(int64_t asize_index)
949 {
950   ink_assert(BUFFER_SIZE_ALLOCATED(asize_index));
951 #ifdef TRACK_BUFFER_USER
952   IOBufferBlock *b = new_IOBufferBlock_internal(_location);
953 #else
954   IOBufferBlock *b = new_IOBufferBlock_internal();
955 #endif
956   b->alloc(asize_index);
957   append_block_internal(b);
958   return;
959 }
960 
961 TS_INLINE void
962 MIOBuffer::add_block()
963 {
964   append_block(size_index);
965 }
966 
967 TS_INLINE void
968 MIOBuffer::check_add_block()
969 {
970   if (!high_water() && current_low_water()) {
971     add_block();
972   }
973 }
974 
975 TS_INLINE IOBufferBlock *
976 MIOBuffer::get_current_block()
977 {
978   return first_write_block();
979 }
980 
981 //////////////////////////////////////////////////////////////////
982 //
983 //  MIOBuffer::current_write_avail()
984 //
985 //  returns the total space available in all blocks.
986 //  This function is different than write_avail() because
987 //  it will not append a new block if there is no space
988 //  or below the watermark space available.
989 //
990 //////////////////////////////////////////////////////////////////
991 TS_INLINE int64_t
992 MIOBuffer::current_write_avail()
993 {
994   int64_t t        = 0;
995   IOBufferBlock *b = _writer.get();
996   while (b) {
997     t += b->write_avail();
998     b = b->next.get();
999   }
1000   return t;
1001 }
1002 
1003 //////////////////////////////////////////////////////////////////
1004 //
1005 //  MIOBuffer::write_avail()
1006 //
1007 //  returns the number of bytes available in the current block.
1008 //  If there is no current block or not enough free space in
1009 //  the current block then a new block is appended.
1010 //
1011 //////////////////////////////////////////////////////////////////
1012 TS_INLINE int64_t
1013 MIOBuffer::write_avail()
1014 {
1015   check_add_block();
1016   return current_write_avail();
1017 }
1018 
1019 TS_INLINE void
1020 MIOBuffer::fill(int64_t len)
1021 {
1022   int64_t f = _writer->write_avail();
1023   while (f < len) {
1024     _writer->fill(f);
1025     len -= f;
1026     if (len > 0) {
1027       _writer = _writer->next;
1028     }
1029     f = _writer->write_avail();
1030   }
1031   _writer->fill(len);
1032 }
1033 
1034 TS_INLINE int
1035 MIOBuffer::max_block_count()
1036 {
1037   int maxb = 0;
1038   for (auto &reader : readers) {
1039     if (reader.allocated()) {
1040       int c = reader.block_count();
1041       if (c > maxb) {
1042         maxb = c;
1043       }
1044     }
1045   }
1046   return maxb;
1047 }
1048 
1049 TS_INLINE int64_t
1050 MIOBuffer::max_read_avail()
1051 {
1052   int64_t s = 0;
1053   int found = 0;
1054   for (auto &reader : readers) {
1055     if (reader.allocated()) {
1056       int64_t ss = reader.read_avail();
1057       if (ss > s) {
1058         s = ss;
1059       }
1060       found = 1;
1061     }
1062   }
1063   if (!found && _writer) {
1064     return _writer->read_avail();
1065   }
1066   return s;
1067 }
1068 
1069 TS_INLINE void
1070 MIOBuffer::set(void *b, int64_t len)
1071 {
1072 #ifdef TRACK_BUFFER_USER
1073   _writer = new_IOBufferBlock_internal(_location);
1074 #else
1075   _writer          = new_IOBufferBlock_internal();
1076 #endif
1077   _writer->set_internal(b, len, BUFFER_SIZE_INDEX_FOR_CONSTANT_SIZE(len));
1078   init_readers();
1079 }
1080 
1081 TS_INLINE void
1082 MIOBuffer::append_xmalloced(void *b, int64_t len)
1083 {
1084 #ifdef TRACK_BUFFER_USER
1085   IOBufferBlock *x = new_IOBufferBlock_internal(_location);
1086 #else
1087   IOBufferBlock *x = new_IOBufferBlock_internal();
1088 #endif
1089   x->set_internal(b, len, BUFFER_SIZE_INDEX_FOR_XMALLOC_SIZE(len));
1090   append_block_internal(x);
1091 }
1092 
1093 TS_INLINE void
1094 MIOBuffer::append_fast_allocated(void *b, int64_t len, int64_t fast_size_index)
1095 {
1096 #ifdef TRACK_BUFFER_USER
1097   IOBufferBlock *x = new_IOBufferBlock_internal(_location);
1098 #else
1099   IOBufferBlock *x = new_IOBufferBlock_internal();
1100 #endif
1101   x->set_internal(b, len, fast_size_index);
1102   append_block_internal(x);
1103 }
1104 
1105 TS_INLINE void
1106 MIOBuffer::alloc(int64_t i)
1107 {
1108 #ifdef TRACK_BUFFER_USER
1109   _writer = new_IOBufferBlock_internal(_location);
1110 #else
1111   _writer          = new_IOBufferBlock_internal();
1112 #endif
1113   _writer->alloc(i);
1114   size_index = i;
1115   init_readers();
1116 }
1117 
1118 TS_INLINE void
1119 MIOBuffer::dealloc_reader(IOBufferReader *e)
1120 {
1121   if (e->accessor) {
1122     ink_assert(e->accessor->writer() == this);
1123     ink_assert(e->accessor->reader() == e);
1124     e->accessor->clear();
1125   }
1126   e->clear();
1127 }
1128 
1129 TS_INLINE IOBufferReader *
1130 IOBufferReader::clone()
1131 {
1132   return mbuf->clone_reader(this);
1133 }
1134 
1135 TS_INLINE void
1136 IOBufferReader::dealloc()
1137 {
1138   mbuf->dealloc_reader(this);
1139 }
1140 
1141 TS_INLINE void
1142 MIOBuffer::dealloc_all_readers()
1143 {
1144   for (auto &reader : readers) {
1145     if (reader.allocated()) {
1146       dealloc_reader(&reader);
1147     }
1148   }
1149 }
1150 
1151 TS_INLINE void
1152 MIOBuffer::set_size_index(int64_t size)
1153 {
1154   size_index = iobuffer_size_to_index(size);
1155 }
1156 
1157 TS_INLINE void
1158 MIOBufferAccessor::reader_for(MIOBuffer *abuf)
1159 {
1160   mbuf = abuf;
1161   if (abuf) {
1162     entry = mbuf->alloc_accessor(this);
1163   } else {
1164     entry = nullptr;
1165   }
1166 }
1167 
1168 TS_INLINE void
1169 MIOBufferAccessor::reader_for(IOBufferReader *areader)
1170 {
1171   if (entry == areader) {
1172     return;
1173   }
1174   mbuf  = areader->mbuf;
1175   entry = areader;
1176   ink_assert(mbuf);
1177 }
1178 
1179 TS_INLINE void
1180 MIOBufferAccessor::writer_for(MIOBuffer *abuf)
1181 {
1182   mbuf  = abuf;
1183   entry = nullptr;
1184 }
1185 
1186 TS_INLINE
1187 MIOBufferAccessor::~MIOBufferAccessor() {}
1188