1 /** @file
2 
3   A brief file description
4 
5   @section license License
6 
7   Licensed to the Apache Software Foundation (ASF) under one
8   or more contributor license agreements.  See the NOTICE file
9   distributed with this work for additional information
10   regarding copyright ownership.  The ASF licenses this file
11   to you under the Apache License, Version 2.0 (the
12   "License"); you may not use this file except in compliance
13   with the License.  You may obtain a copy of the License at
14 
15       http://www.apache.org/licenses/LICENSE-2.0
16 
17   Unless required by applicable law or agreed to in writing, software
18   distributed under the License is distributed on an "AS IS" BASIS,
19   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20   See the License for the specific language governing permissions and
21   limitations under the License.
22  */
23 
24 /****************************************************************************
25 
26    HttpSessionManager.cc
27 
28    Description:
29 
30 
31  ****************************************************************************/
32 
33 #include "HttpSessionManager.h"
34 #include "../ProxySession.h"
35 #include "Http1ServerSession.h"
36 #include "HttpSM.h"
37 #include "HttpDebugNames.h"
38 
39 // Initialize a thread to handle HTTP session management
40 void
initialize_thread_for_http_sessions(EThread * thread)41 initialize_thread_for_http_sessions(EThread *thread)
42 {
43   thread->server_session_pool = new ServerSessionPool;
44 }
45 
46 HttpSessionManager httpSessionManager;
47 
ServerSessionPool()48 ServerSessionPool::ServerSessionPool() : Continuation(new_ProxyMutex()), m_ip_pool(1023), m_fqdn_pool(1023)
49 {
50   SET_HANDLER(&ServerSessionPool::eventHandler);
51   m_ip_pool.set_expansion_policy(IPTable::MANUAL);
52   m_fqdn_pool.set_expansion_policy(FQDNTable::MANUAL);
53 }
54 
55 void
purge()56 ServerSessionPool::purge()
57 {
58   // @c do_io_close can free the instance which clears the intrusive links and breaks the iterator.
59   // Therefore @c do_io_close is called on a post-incremented iterator.
60   m_ip_pool.apply([](Http1ServerSession *ssn) -> void { ssn->do_io_close(); });
61   m_ip_pool.clear();
62   m_fqdn_pool.clear();
63 }
64 
65 bool
match(Http1ServerSession * ss,sockaddr const * addr,CryptoHash const & hostname_hash,TSServerSessionSharingMatchType match_style)66 ServerSessionPool::match(Http1ServerSession *ss, sockaddr const *addr, CryptoHash const &hostname_hash,
67                          TSServerSessionSharingMatchType match_style)
68 {
69   return TS_SERVER_SESSION_SHARING_MATCH_NONE !=
70            match_style && // if no matching allowed, fail immediately.
71                           // The hostname matches if we're not checking it or it (and the port!) is a match.
72          (TS_SERVER_SESSION_SHARING_MATCH_IP == match_style ||
73           (ats_ip_port_cast(addr) == ats_ip_port_cast(ss->get_server_ip()) && ss->hostname_hash == hostname_hash)) &&
74          // The IP address matches if we're not checking it or it is a match.
75          (TS_SERVER_SESSION_SHARING_MATCH_HOST == match_style || ats_ip_addr_port_eq(ss->get_server_ip(), addr));
76 }
77 
78 bool
validate_sni(HttpSM * sm,NetVConnection * netvc)79 ServerSessionPool::validate_sni(HttpSM *sm, NetVConnection *netvc)
80 {
81   // TS-4468: If the connection matches, make sure the SNI server
82   // name (if present) matches the request hostname
83   int len              = 0;
84   const char *req_host = sm->t_state.hdr_info.server_request.host_get(&len);
85   // The sni_servername of the connection was set on HttpSM::do_http_server_open
86   // by fetching the hostname from the server request.  So the connection should only
87   // be reused if the hostname in the new request is the same as the host name in the
88   // original request
89   const char *session_sni = netvc->options.sni_servername;
90 
91   return ((sm->t_state.scheme != URL_WKSIDX_HTTPS) || !session_sni || strncasecmp(session_sni, req_host, len) == 0);
92 }
93 
94 HSMresult_t
acquireSession(sockaddr const * addr,CryptoHash const & hostname_hash,TSServerSessionSharingMatchType match_style,HttpSM * sm,Http1ServerSession * & to_return)95 ServerSessionPool::acquireSession(sockaddr const *addr, CryptoHash const &hostname_hash,
96                                   TSServerSessionSharingMatchType match_style, HttpSM *sm, Http1ServerSession *&to_return)
97 {
98   HSMresult_t zret = HSM_NOT_FOUND;
99   to_return        = nullptr;
100 
101   if (TS_SERVER_SESSION_SHARING_MATCH_HOST == match_style) {
102     // This is broken out because only in this case do we check the host hash first. The range must be checked
103     // to verify an upstream that matches port and SNI name is selected. Walk backwards to select oldest.
104     in_port_t port = ats_ip_port_cast(addr);
105     FQDNTable::iterator first, last;
106     // FreeBSD/clang++ bug workaround: explicit cast to super type to make overload work. Not needed on Fedora27 nor gcc.
107     // Not fixed on FreeBSD as of llvm 6.0.1.
108     std::tie(first, last) = static_cast<const decltype(m_fqdn_pool)::range::super_type &>(m_fqdn_pool.equal_range(hostname_hash));
109     while (last != first) {
110       --last;
111       if (port == ats_ip_port_cast(last->get_server_ip()) && validate_sni(sm, last->get_netvc())) {
112         zret = HSM_DONE;
113         break;
114       }
115     }
116     if (zret == HSM_DONE) {
117       to_return = last;
118       m_fqdn_pool.erase(last);
119       m_ip_pool.erase(to_return);
120     }
121   } else if (TS_SERVER_SESSION_SHARING_MATCH_NONE != match_style) { // matching is not disabled.
122     IPTable::iterator first, last;
123     // FreeBSD/clang++ bug workaround: explicit cast to super type to make overload work. Not needed on Fedora27 nor gcc.
124     // Not fixed on FreeBSD as of llvm 6.0.1.
125     std::tie(first, last) = static_cast<const decltype(m_ip_pool)::range::super_type &>(m_ip_pool.equal_range(addr));
126     // The range is all that is needed in the match IP case, otherwise need to scan for matching fqdn.
127     // Note the port is matched as part of the address key so it doesn't need to be checked again.
128     if (TS_SERVER_SESSION_SHARING_MATCH_IP != match_style) {
129       while (last != first) {
130         --last;
131         if (last->hostname_hash == hostname_hash && validate_sni(sm, last->get_netvc())) {
132           zret = HSM_DONE;
133           break;
134         }
135       }
136     } else if (last != first) {
137       --last;
138       zret = HSM_DONE;
139     }
140     if (zret == HSM_DONE) {
141       to_return = last;
142       m_ip_pool.erase(last);
143       m_fqdn_pool.erase(to_return);
144     }
145   }
146   return zret;
147 }
148 
149 void
releaseSession(Http1ServerSession * ss)150 ServerSessionPool::releaseSession(Http1ServerSession *ss)
151 {
152   ss->state = HSS_KA_SHARED;
153   // Now we need to issue a read on the connection to detect
154   //  if it closes on us.  We will get called back in the
155   //  continuation for this bucket, ensuring we have the lock
156   //  to remove the connection from our lists
157   ss->do_io_read(this, INT64_MAX, ss->read_buffer);
158 
159   // Transfer control of the write side as well
160   ss->do_io_write(this, 0, nullptr);
161 
162   // we probably don't need the active timeout set, but will leave it for now
163   ss->get_netvc()->set_inactivity_timeout(ss->get_netvc()->get_inactivity_timeout());
164   ss->get_netvc()->set_active_timeout(ss->get_netvc()->get_active_timeout());
165   // put it in the pools.
166   m_ip_pool.insert(ss);
167   m_fqdn_pool.insert(ss);
168 
169   Debug("http_ss",
170         "[%" PRId64 "] [release session] "
171         "session placed into shared pool",
172         ss->con_id);
173 }
174 
175 //   Called from the NetProcessor to let us know that a
176 //    connection has closed down
177 //
178 int
eventHandler(int event,void * data)179 ServerSessionPool::eventHandler(int event, void *data)
180 {
181   NetVConnection *net_vc = nullptr;
182   Http1ServerSession *s  = nullptr;
183 
184   switch (event) {
185   case VC_EVENT_READ_READY:
186   // The server sent us data.  This is unexpected so
187   //   close the connection
188   /* Fall through */
189   case VC_EVENT_EOS:
190   case VC_EVENT_ERROR:
191   case VC_EVENT_INACTIVITY_TIMEOUT:
192   case VC_EVENT_ACTIVE_TIMEOUT:
193     net_vc = static_cast<NetVConnection *>((static_cast<VIO *>(data))->vc_server);
194     break;
195 
196   default:
197     ink_release_assert(0);
198     return 0;
199   }
200 
201   sockaddr const *addr                 = net_vc->get_remote_addr();
202   HttpConfigParams *http_config_params = HttpConfig::acquire();
203   bool found                           = false;
204 
205   for (auto spot = m_ip_pool.find(addr); spot != m_ip_pool.end() && spot->_ip_link.equal(addr, spot); ++spot) {
206     if ((s = spot)->get_netvc() == net_vc) {
207       // if there was a timeout of some kind on a keep alive connection, and
208       // keeping the connection alive will not keep us above the # of max connections
209       // to the origin and we are below the min number of keep alive connections to this
210       // origin, then reset the timeouts on our end and do not close the connection
211       if ((event == VC_EVENT_INACTIVITY_TIMEOUT || event == VC_EVENT_ACTIVE_TIMEOUT) && s->state == HSS_KA_SHARED &&
212           s->conn_track_group) {
213         Debug("http_ss", "s->conn_track_group->min_keep_alive_conns : %d", s->conn_track_group->min_keep_alive_conns);
214         bool connection_count_below_min = s->conn_track_group->_count <= s->conn_track_group->min_keep_alive_conns;
215 
216         if (connection_count_below_min) {
217           Debug("http_ss",
218                 "[%" PRId64 "] [session_bucket] session received io notice [%s], "
219                 "resetting timeout to maintain minimum number of connections",
220                 s->con_id, HttpDebugNames::get_event_name(event));
221           s->get_netvc()->set_inactivity_timeout(s->get_netvc()->get_inactivity_timeout());
222           s->get_netvc()->set_active_timeout(s->get_netvc()->get_active_timeout());
223           found = true;
224           break;
225         }
226       }
227 
228       // We've found our server session. Remove it from
229       //   our lists and close it down
230       Debug("http_ss", "[%" PRId64 "] [session_pool] session %p received io notice [%s]", s->con_id, s,
231             HttpDebugNames::get_event_name(event));
232       ink_assert(s->state == HSS_KA_SHARED);
233       // Out of the pool! Now!
234       m_ip_pool.erase(spot);
235       m_fqdn_pool.erase(s);
236       // Drop connection on this end.
237       s->do_io_close();
238       found = true;
239       break;
240     }
241   }
242 
243   HttpConfig::release(http_config_params);
244   if (!found) {
245     // We failed to find our session.  This can only be the result of a programming flaw. Since we only ever keep
246     // UnixNetVConnections and SSLNetVConnections in the session pool, the dynamic cast won't fail.
247     UnixNetVConnection *unix_net_vc = dynamic_cast<UnixNetVConnection *>(net_vc);
248     if (unix_net_vc) {
249       char peer_ip[INET6_ADDRPORTSTRLEN];
250       ats_ip_nptop(unix_net_vc->get_remote_addr(), peer_ip, sizeof(peer_ip));
251 
252       Warning("Connection leak from http keep-alive system fd=%d closed=%d peer_ip_port=%s", unix_net_vc->con.fd,
253               unix_net_vc->closed, peer_ip);
254     }
255     ink_assert(0);
256   }
257   return 0;
258 }
259 
260 void
init()261 HttpSessionManager::init()
262 {
263   m_g_pool = new ServerSessionPool;
264   eventProcessor.schedule_spawn(&initialize_thread_for_http_sessions, ET_NET);
265 }
266 
267 // TODO: Should this really purge all keep-alive sessions?
268 // Does this make any sense, since we always do the global pool and not the per thread?
269 void
purge_keepalives()270 HttpSessionManager::purge_keepalives()
271 {
272   EThread *ethread = this_ethread();
273 
274   MUTEX_TRY_LOCK(lock, m_g_pool->mutex, ethread);
275   if (lock.is_locked()) {
276     m_g_pool->purge();
277   } // should we do something clever if we don't get the lock?
278 }
279 
280 HSMresult_t
acquire_session(Continuation *,sockaddr const * ip,const char * hostname,ProxyTransaction * ua_txn,HttpSM * sm)281 HttpSessionManager::acquire_session(Continuation * /* cont ATS_UNUSED */, sockaddr const *ip, const char *hostname,
282                                     ProxyTransaction *ua_txn, HttpSM *sm)
283 {
284   Http1ServerSession *to_return = nullptr;
285   TSServerSessionSharingMatchType match_style =
286     static_cast<TSServerSessionSharingMatchType>(sm->t_state.txn_conf->server_session_sharing_match);
287   CryptoHash hostname_hash;
288   HSMresult_t retval = HSM_NOT_FOUND;
289 
290   CryptoContext().hash_immediate(hostname_hash, (unsigned char *)hostname, strlen(hostname));
291 
292   // First check to see if there is a server session bound
293   //   to the user agent session
294   to_return = ua_txn->get_server_session();
295   if (to_return != nullptr) {
296     ua_txn->attach_server_session(nullptr);
297 
298     // Since the client session is reusing the same server session, it seems that the SNI should match
299     // Will the client make requests to different hosts over the same SSL session? Though checking
300     // the IP/hostname here seems a bit redundant too
301     //
302     if (ServerSessionPool::match(to_return, ip, hostname_hash, match_style) &&
303         ServerSessionPool::validate_sni(sm, to_return->get_netvc())) {
304       Debug("http_ss", "[%" PRId64 "] [acquire session] returning attached session ", to_return->con_id);
305       to_return->state = HSS_ACTIVE;
306       sm->attach_server_session(to_return);
307       return HSM_DONE;
308     }
309     // Release this session back to the main session pool and
310     //   then continue looking for one from the shared pool
311     Debug("http_ss",
312           "[%" PRId64 "] [acquire session] "
313           "session not a match, returning to shared pool",
314           to_return->con_id);
315     to_return->release();
316     to_return = nullptr;
317   }
318 
319   // TS-3797 Adding another scope so the pool lock is dropped after it is removed from the pool and
320   // potentially moved to the current thread.  At the end of this scope, either the original
321   // pool selected VC is on the current thread or its content has been moved to a new VC on the
322   // current thread and the original has been deleted. This should adequately cover TS-3266 so we
323   // don't have to continue to hold the pool thread while we initialize the server session in the
324   // client session
325   {
326     // Now check to see if we have a connection in our shared connection pool
327     EThread *ethread = this_ethread();
328     Ptr<ProxyMutex> pool_mutex =
329       (TS_SERVER_SESSION_SHARING_POOL_THREAD == sm->t_state.http_config_param->server_session_sharing_pool) ?
330         ethread->server_session_pool->mutex :
331         m_g_pool->mutex;
332     MUTEX_TRY_LOCK(lock, pool_mutex, ethread);
333     if (lock.is_locked()) {
334       if (TS_SERVER_SESSION_SHARING_POOL_THREAD == sm->t_state.http_config_param->server_session_sharing_pool) {
335         retval = ethread->server_session_pool->acquireSession(ip, hostname_hash, match_style, sm, to_return);
336         Debug("http_ss", "[acquire session] thread pool search %s", to_return ? "successful" : "failed");
337       } else {
338         retval = m_g_pool->acquireSession(ip, hostname_hash, match_style, sm, to_return);
339         Debug("http_ss", "[acquire session] global pool search %s", to_return ? "successful" : "failed");
340         // At this point to_return has been removed from the pool. Do we need to move it
341         // to the same thread?
342         if (to_return) {
343           UnixNetVConnection *server_vc = dynamic_cast<UnixNetVConnection *>(to_return->get_netvc());
344           if (server_vc) {
345             UnixNetVConnection *new_vc = server_vc->migrateToCurrentThread(sm, ethread);
346             // The VC moved, free up the original one
347             if (new_vc != server_vc) {
348               ink_assert(new_vc == nullptr || new_vc->nh != nullptr);
349               if (!new_vc) {
350                 // Close out to_return, we were't able to get a connection
351                 to_return->do_io_close();
352                 to_return = nullptr;
353                 retval    = HSM_NOT_FOUND;
354               } else {
355                 // Keep things from timing out on us
356                 new_vc->set_inactivity_timeout(new_vc->get_inactivity_timeout());
357                 to_return->set_netvc(new_vc);
358               }
359             } else {
360               // Keep things from timing out on us
361               server_vc->set_inactivity_timeout(server_vc->get_inactivity_timeout());
362             }
363           }
364         }
365       }
366     } else { // Didn't get the lock.  to_return is still NULL
367       retval = HSM_RETRY;
368     }
369   }
370 
371   if (to_return) {
372     Debug("http_ss", "[%" PRId64 "] [acquire session] return session from shared pool", to_return->con_id);
373     to_return->state = HSS_ACTIVE;
374     // the attach_server_session will issue the do_io_read under the sm lock
375     sm->attach_server_session(to_return);
376     retval = HSM_DONE;
377   }
378   return retval;
379 }
380 
381 HSMresult_t
release_session(Http1ServerSession * to_release)382 HttpSessionManager::release_session(Http1ServerSession *to_release)
383 {
384   EThread *ethread = this_ethread();
385   ServerSessionPool *pool =
386     TS_SERVER_SESSION_SHARING_POOL_THREAD == to_release->sharing_pool ? ethread->server_session_pool : m_g_pool;
387   bool released_p = true;
388 
389   // The per thread lock looks like it should not be needed but if it's not locked the close checking I/O op will crash.
390   MUTEX_TRY_LOCK(lock, pool->mutex, ethread);
391   if (lock.is_locked()) {
392     pool->releaseSession(to_release);
393   } else {
394     Debug("http_ss", "[%" PRId64 "] [release session] could not release session due to lock contention", to_release->con_id);
395     released_p = false;
396   }
397 
398   return released_p ? HSM_DONE : HSM_RETRY;
399 }
400