• Main Page
  • Related Pages
  • Namespaces
  • Data Structures
  • Files
  • File List
  • Globals

HttpSM.cc

Go to the documentation of this file.
00001 /** @file
00002 
00003   HTTP state machine
00004 
00005   @section license License
00006 
00007   Licensed to the Apache Software Foundation (ASF) under one
00008   or more contributor license agreements.  See the NOTICE file
00009   distributed with this work for additional information
00010   regarding copyright ownership.  The ASF licenses this file
00011   to you under the Apache License, Version 2.0 (the
00012   "License"); you may not use this file except in compliance
00013   with the License.  You may obtain a copy of the License at
00014 
00015       http://www.apache.org/licenses/LICENSE-2.0
00016 
00017   Unless required by applicable law or agreed to in writing, software
00018   distributed under the License is distributed on an "AS IS" BASIS,
00019   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00020   See the License for the specific language governing permissions and
00021   limitations under the License.
00022 
00023  */
00024 
00025 #include "HttpSM.h"
00026 #include "ProxyConfig.h"
00027 #include "HttpClientSession.h"
00028 #include "HttpServerSession.h"
00029 #include "HttpDebugNames.h"
00030 #include "HttpSessionManager.h"
00031 #include "P_Cache.h"
00032 #include "P_Net.h"
00033 #include "StatPages.h"
00034 #include "Log.h"
00035 #include "LogAccessHttp.h"
00036 #include "ICP.h"
00037 #include "PluginVC.h"
00038 #include "ReverseProxy.h"
00039 #include "RemapProcessor.h"
00040 #include "Transform.h"
00041 
00042 #include "HttpPages.h"
00043 
00044 //#include "I_Auth.h"
00045 //#include "HttpAuthParams.h"
00046 #include "congest/Congestion.h"
00047 
00048 #define DEFAULT_RESPONSE_BUFFER_SIZE_INDEX    6 // 8K
00049 #define DEFAULT_REQUEST_BUFFER_SIZE_INDEX    6  // 8K
00050 #define MIN_CONFIG_BUFFER_SIZE_INDEX          5 // 4K
00051 
00052 #define hsm_release_assert(EX) \
00053 { \
00054       if (!(EX)) \
00055       { \
00056          this->dump_state_on_assert(); \
00057          _ink_assert(#EX, __FILE__, __LINE__); \
00058       } \
00059 }
00060 
00061 /*
00062  * Comment this off if you dont
00063  * want httpSM to use new_empty_MIOBuffer(..) call
00064  */
00065 
00066 #define USE_NEW_EMPTY_MIOBUFFER
00067 
00068 // We have a debugging list that can use to find stuck
00069 //  state machines
00070 DLL<HttpSM> debug_sm_list;
00071 ink_mutex debug_sm_list_mutex;
00072 
00073 //  _instantiate_func is called from the fast allocator to initialize
00074 //  newly-allocated HttpSM objects.  By default, the fast allocators
00075 //  just memcpys the entire prototype object, but this function does
00076 //  sparse initialization, not copying dead space for history.
00077 //
00078 //  Most of the content of in the prototype object consists of zeroes.
00079 //  To take advantage of that, a "scatter list" is constructed of
00080 //  the non-zero words, and those values are scattered onto the
00081 //  new object after first zeroing out the object (except for dead space).
00082 //
00083 //  make_scatter_list should be called only once (during static
00084 //  initialization, since it isn't thread safe).
00085 
00086 #define MAX_SCATTER_LEN  (sizeof(HttpSM)/sizeof(uint32_t) + 1)
00087 static uint32_t val[MAX_SCATTER_LEN];
00088 static uint16_t to[MAX_SCATTER_LEN];
00089 static int scat_count = 0;
00090 
00091 static const int sub_header_size = sizeof("Content-type: ") - 1 + 2 + sizeof("Content-range: bytes ") - 1 + 4;
00092 static const int boundary_size = 2 + sizeof("RANGE_SEPARATOR") - 1 + 2;
00093 
00094 static const char *str_100_continue_response = "HTTP/1.1 100 Continue\r\n\r\n";
00095 static const int len_100_continue_response = strlen(str_100_continue_response);
00096 
00097 /**
00098  * Takes two milestones and returns the difference.
00099  * @param start The start time
00100  * @param end The end time
00101  * @return A double that is the time in seconds
00102  */
00103 static double
00104 milestone_difference(const ink_hrtime start, const ink_hrtime end)
00105 {
00106   if (end == 0) {
00107     return -1;
00108   }
00109   return (double) (end - start) / 1000000000;
00110 }
00111 
00112 static double
00113 milestone_difference_msec(const ink_hrtime start, const ink_hrtime end)
00114 {
00115   if (end == 0) {
00116     return -1;
00117   }
00118   return (double) (end - start) / 1000000;
00119 }
00120 
00121 void
00122 HttpSM::_make_scatter_list(HttpSM * prototype)
00123 {
00124   int j;
00125   int total_len = sizeof(HttpSM);
00126 
00127   uint32_t *p = (uint32_t *) prototype;
00128   int n = total_len / sizeof(uint32_t);
00129   scat_count = 0;
00130   for (j = 0; j < n; j++) {
00131     if (p[j]) {
00132       to[scat_count] = j;
00133       val[scat_count] = p[j];
00134       scat_count++;
00135     }
00136   }
00137 }
00138 
00139 void
00140 HttpSM::_instantiate_func(HttpSM * prototype, HttpSM * new_instance)
00141 {
00142   int history_len = sizeof(prototype->history);
00143   int total_len = sizeof(HttpSM);
00144   int pre_history_len = (char *) (&(prototype->history)) - (char *) prototype;
00145   int post_history_len = total_len - history_len - pre_history_len;
00146   int post_offset = pre_history_len + history_len;
00147   int j;
00148 
00149   memset(((char *) new_instance), 0, pre_history_len);
00150   memset(((char *) new_instance) + post_offset, 0, post_history_len);
00151 
00152   uint32_t *pd = (uint32_t *) new_instance;
00153 
00154   for (j = 0; j < scat_count; j++) {
00155     pd[to[j]] = val[j];
00156   }
00157 
00158   ink_assert((memcmp((char *) new_instance, (char *) prototype, pre_history_len) == 0) &&
00159              (memcmp(((char *) new_instance) + post_offset, ((char *) prototype) + post_offset, post_history_len) == 0));
00160 }
00161 
00162 SparseClassAllocator<HttpSM> httpSMAllocator("httpSMAllocator", 128, 16, HttpSM::_instantiate_func);
00163 
00164 #define HTTP_INCREMENT_TRANS_STAT(X) HttpTransact::update_stat(&t_state, X, 1);
00165 
00166 HttpVCTable::HttpVCTable()
00167 {
00168   memset(&vc_table, 0, sizeof(vc_table));
00169 }
00170 
00171 HttpVCTableEntry *
00172 HttpVCTable::new_entry()
00173 {
00174   for (int i = 0; i < vc_table_max_entries; i++) {
00175     if (vc_table[i].vc == NULL) {
00176       return vc_table + i;
00177     }
00178   }
00179 
00180   ink_release_assert(0);
00181   return NULL;
00182 }
00183 
00184 HttpVCTableEntry *
00185 HttpVCTable::find_entry(VConnection * vc)
00186 {
00187   for (int i = 0; i < vc_table_max_entries; i++) {
00188     if (vc_table[i].vc == vc) {
00189       return vc_table + i;
00190     }
00191   }
00192 
00193   return NULL;
00194 }
00195 
00196 HttpVCTableEntry *
00197 HttpVCTable::find_entry(VIO * vio)
00198 {
00199   for (int i = 0; i < vc_table_max_entries; i++) {
00200     if (vc_table[i].read_vio == vio || vc_table[i].write_vio == vio) {
00201       ink_assert(vc_table[i].vc != NULL);
00202       return vc_table + i;
00203     }
00204   }
00205 
00206   return NULL;
00207 }
00208 
00209 // bool HttpVCTable::remove_entry(HttpVCEntry* e)
00210 //
00211 //    Deallocates all buffers from the associated
00212 //      entry and re-initializes it's other fields
00213 //      for reuse
00214 //
00215 void
00216 HttpVCTable::remove_entry(HttpVCTableEntry * e)
00217 {
00218   ink_assert(e->vc == NULL || e->in_tunnel);
00219   e->vc = NULL;
00220   e->eos = false;
00221   if (e->read_buffer) {
00222     free_MIOBuffer(e->read_buffer);
00223     e->read_buffer = NULL;
00224   }
00225   if (e->write_buffer) {
00226     free_MIOBuffer(e->write_buffer);
00227     e->write_buffer = NULL;
00228   }
00229   e->read_vio = NULL;
00230   e->write_vio = NULL;
00231   e->vc_handler = NULL;
00232   e->vc_type = HTTP_UNKNOWN;
00233   e->in_tunnel = false;
00234 }
00235 
00236 // bool HttpVCTable::cleanup_entry(HttpVCEntry* e)
00237 //
00238 //    Closes the associate vc for the entry,
00239 //     and the call remove_entry
00240 //
00241 void
00242 HttpVCTable::cleanup_entry(HttpVCTableEntry * e)
00243 {
00244   ink_assert(e->vc);
00245   if (e->in_tunnel == false) {
00246     // Update stats
00247     switch (e->vc_type) {
00248     case HTTP_UA_VC:
00249 //              HTTP_DECREMENT_DYN_STAT(http_current_client_transactions_stat);
00250       break;
00251     default:
00252       // This covers:
00253       // HTTP_UNKNOWN, HTTP_SERVER_VC, HTTP_TRANSFORM_VC, HTTP_CACHE_READ_VC,
00254       // HTTP_CACHE_WRITE_VC, HTTP_RAW_SERVER_VC
00255       break;
00256     }
00257 
00258     e->vc->do_io_close();
00259     e->vc = NULL;
00260   }
00261   remove_entry(e);
00262 }
00263 
00264 void
00265 HttpVCTable::cleanup_all()
00266 {
00267   for (int i = 0; i < vc_table_max_entries; i++) {
00268     if (vc_table[i].vc != NULL) {
00269       cleanup_entry(vc_table + i);
00270     }
00271   }
00272 }
00273 
00274 #define REMEMBER_EVENT_FILTER(e) 1
00275 
00276 #define __REMEMBER(x)  #x
00277 #define _REMEMBER(x)   __REMEMBER(x)
00278 
00279 #define RECORD_FILE_LINE() \
00280 history[pos].fileline = __FILE__ ":" _REMEMBER (__LINE__);
00281 
00282 #define REMEMBER(e,r) \
00283 { if (REMEMBER_EVENT_FILTER(e)) { \
00284     add_history_entry(__FILE__ ":" _REMEMBER (__LINE__), e, r); }}
00285 
00286 #define DebugSM(tag, ...) DebugSpecific(debug_on, tag, __VA_ARGS__)
00287 
00288 #ifdef STATE_ENTER
00289 #undef STATE_ENTER
00290 #endif
00291 #define STATE_ENTER(state_name, event) { \
00292     /*ink_assert (magic == HTTP_SM_MAGIC_ALIVE); */ REMEMBER (event, reentrancy_count);  \
00293         DebugSM("http", "[%" PRId64 "] [%s, %s]", sm_id, \
00294         #state_name, HttpDebugNames::get_event_name(event)); }
00295 
00296 #define HTTP_SM_SET_DEFAULT_HANDLER(_h) \
00297 { \
00298   REMEMBER(-1,reentrancy_count); \
00299   default_handler = _h; }
00300 
00301 
00302 static int next_sm_id = 0;
00303 
00304 
00305 HttpSM::HttpSM()
00306   : Continuation(NULL), sm_id(-1), magic(HTTP_SM_MAGIC_DEAD),
00307     //YTS Team, yamsat Plugin
00308     enable_redirection(false), redirect_url(NULL), redirect_url_len(0), redirection_tries(0),
00309     transfered_bytes(0), post_failed(false), debug_on(false),
00310     plugin_tunnel_type(HTTP_NO_PLUGIN_TUNNEL),
00311     plugin_tunnel(NULL), reentrancy_count(0),
00312     history_pos(0), tunnel(), ua_entry(NULL),
00313     ua_session(NULL), background_fill(BACKGROUND_FILL_NONE),
00314     ua_raw_buffer_reader(NULL),
00315     server_entry(NULL), server_session(NULL), shared_session_retries(0),
00316     server_buffer_reader(NULL),
00317     transform_info(), post_transform_info(), has_active_plugin_agents(false),
00318     second_cache_sm(NULL),
00319     default_handler(NULL), pending_action(NULL), historical_action(NULL),
00320     last_action(HttpTransact::SM_ACTION_UNDEFINED),
00321     // TODO:  Now that bodies can be empty, should the body counters be set to -1 ? TS-2213
00322     client_request_hdr_bytes(0), client_request_body_bytes(0),
00323     server_request_hdr_bytes(0), server_request_body_bytes(0),
00324     server_response_hdr_bytes(0), server_response_body_bytes(0),
00325     client_response_hdr_bytes(0), client_response_body_bytes(0),
00326     cache_response_hdr_bytes(0), cache_response_body_bytes(0),
00327     pushed_response_hdr_bytes(0), pushed_response_body_bytes(0),
00328     plugin_tag(0), plugin_id(0),
00329     hooks_set(false), cur_hook_id(TS_HTTP_LAST_HOOK), cur_hook(NULL),
00330     cur_hooks(0), callout_state(HTTP_API_NO_CALLOUT), terminate_sm(false), kill_this_async_done(false)
00331 {
00332   static int scatter_init = 0;
00333 
00334   memset(&history, 0, sizeof(history));
00335   memset(&vc_table, 0, sizeof(vc_table));
00336   memset(&http_parser, 0, sizeof(http_parser));
00337 
00338   if (!scatter_init) {
00339     _make_scatter_list(this);
00340     scatter_init = 1;
00341   }
00342 }
00343 
00344 void
00345 HttpSM::cleanup()
00346 {
00347   t_state.destroy();
00348   api_hooks.clear();
00349   http_parser_clear(&http_parser);
00350 
00351   // t_state.content_control.cleanup();
00352 
00353   HttpConfig::release(t_state.http_config_param);
00354 
00355   mutex.clear();
00356   tunnel.mutex.clear();
00357   cache_sm.mutex.clear();
00358   transform_cache_sm.mutex.clear();
00359   if (second_cache_sm) {
00360     second_cache_sm->mutex.clear();
00361     delete second_cache_sm;
00362   }
00363   magic = HTTP_SM_MAGIC_DEAD;
00364   debug_on = false;
00365 }
00366 
00367 void
00368 HttpSM::destroy()
00369 {
00370   cleanup();
00371   httpSMAllocator.free(this);
00372 }
00373 
00374 void
00375 HttpSM::init()
00376 {
00377   milestones.sm_start = ink_get_hrtime();
00378 
00379   magic = HTTP_SM_MAGIC_ALIVE;
00380   sm_id = 0;
00381 
00382   // Unique state machine identifier.
00383   //  changed next_sm_id from int64_t to int because
00384   //  atomic(32) is faster than atomic64.  The id is just
00385   //  for debugging, so it's OK if it wraps every few days,
00386   //  as long as the http_info bucket hash still works.
00387   //  (To test this, initialize next_sm_id to 0x7ffffff0)
00388   //  Leaving sm_id as int64_t to minimize code changes.
00389 
00390   sm_id = (int64_t) ink_atomic_increment((&next_sm_id), 1);
00391   t_state.state_machine_id = sm_id;
00392   t_state.state_machine = this;
00393 
00394   t_state.http_config_param = HttpConfig::acquire();
00395 
00396   // Simply point to the global config for the time being, no need to copy this
00397   // entire struct if nothing is going to change it.
00398   t_state.txn_conf = &t_state.http_config_param->oride;
00399 
00400   // update the cache info config structure so that
00401   // selection from alternates happens correctly.
00402   t_state.cache_info.config.cache_global_user_agent_header = t_state.txn_conf->global_user_agent_header ? true : false;
00403   t_state.cache_info.config.ignore_accept_mismatch = t_state.http_config_param->ignore_accept_mismatch;
00404   t_state.cache_info.config.ignore_accept_language_mismatch = t_state.http_config_param->ignore_accept_language_mismatch ;
00405   t_state.cache_info.config.ignore_accept_encoding_mismatch = t_state.http_config_param->ignore_accept_encoding_mismatch;
00406   t_state.cache_info.config.ignore_accept_charset_mismatch = t_state.http_config_param->ignore_accept_charset_mismatch;
00407   t_state.cache_info.config.cache_enable_default_vary_headers = t_state.http_config_param->cache_enable_default_vary_headers ? true : false;
00408 
00409   t_state.cache_info.config.cache_vary_default_text = t_state.http_config_param->cache_vary_default_text;
00410   t_state.cache_info.config.cache_vary_default_images = t_state.http_config_param->cache_vary_default_images;
00411   t_state.cache_info.config.cache_vary_default_other = t_state.http_config_param->cache_vary_default_other;
00412 
00413   t_state.init();
00414   t_state.srv_lookup = hostdb_srv_enabled;
00415 
00416   // Added to skip dns if the document is in cache. DNS will be forced if there is a ip based ACL in
00417   // cache control or parent.config or if the doc_in_cache_skip_dns is disabled or if http caching is disabled
00418   // TODO: This probably doesn't honor this as a per-transaction overridable config.
00419   t_state.force_dns = (ip_rule_in_CacheControlTable() || t_state.parent_params->ParentTable->ipMatch ||
00420                        !(t_state.txn_conf->doc_in_cache_skip_dns) || !(t_state.txn_conf->cache_http));
00421 
00422   http_parser_init(&http_parser);
00423 
00424   SET_HANDLER(&HttpSM::main_handler);
00425 
00426 #ifdef USE_HTTP_DEBUG_LISTS
00427   ink_mutex_acquire(&debug_sm_list_mutex);
00428   debug_sm_list.push(this, this->debug_link);
00429   ink_mutex_release(&debug_sm_list_mutex);
00430 #endif
00431 
00432 }
00433 
00434 void
00435 HttpSM::set_ua_half_close_flag()
00436 {
00437   ua_session->set_half_close_flag();
00438 }
00439 
00440 inline void
00441 HttpSM::do_api_callout()
00442 {
00443   if (hooks_set) {
00444     do_api_callout_internal();
00445   } else {
00446     handle_api_return();
00447   }
00448 }
00449 
00450 int
00451 HttpSM::state_add_to_list(int event, void * /* data ATS_UNUSED */)
00452 {
00453   // The list if for stat pages and general debugging
00454   //   The config variable exists mostly to allow us to
00455   //   measure an performance drop during benchmark runs
00456   if (t_state.http_config_param->enable_http_info) {
00457     STATE_ENTER(&HttpSM::state_add_to_list, event);
00458     ink_assert(event == EVENT_NONE || event == EVENT_INTERVAL);
00459 
00460     int bucket = ((unsigned int) sm_id % HTTP_LIST_BUCKETS);
00461 
00462     MUTEX_TRY_LOCK(lock, HttpSMList[bucket].mutex, mutex->thread_holding);
00463     // the client_vc`s timeout events can be triggered, so we should not
00464     // reschedule the http_sm when the lock is not acquired.
00465     // FIXME: the sm_list may miss some http_sms when the lock contention
00466     if (lock)
00467       HttpSMList[bucket].sm_list.push(this);
00468   }
00469 
00470   t_state.api_next_action = HttpTransact::SM_ACTION_API_SM_START;
00471   do_api_callout();
00472   return EVENT_DONE;
00473 }
00474 
00475 int
00476 HttpSM::state_remove_from_list(int event, void * /* data ATS_UNUSED */)
00477 {
00478   // The config parameters are guaranteed not change
00479   //   across the life of a transaction so it safe to
00480   //   check the config here and use it detrmine
00481   //   whether we need to strip ourselves off of the
00482   //   state page list
00483   if (t_state.http_config_param->enable_http_info) {
00484     STATE_ENTER(&HttpSM::state_remove_from_list, event);
00485     ink_assert(event == EVENT_NONE || event == EVENT_INTERVAL);
00486 
00487     int bucket = ((unsigned int) sm_id % HTTP_LIST_BUCKETS);
00488 
00489     MUTEX_TRY_LOCK(lock, HttpSMList[bucket].mutex, mutex->thread_holding);
00490     if (!lock) {
00491       HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_remove_from_list);
00492       mutex->thread_holding->schedule_in(this, HTTP_LIST_RETRY);
00493       return EVENT_DONE;
00494     }
00495 
00496     HttpSMList[bucket].sm_list.remove(this);
00497   }
00498 
00499   return this->kill_this_async_hook(EVENT_NONE, NULL);
00500 }
00501 
00502 int
00503 HttpSM::kill_this_async_hook(int /* event ATS_UNUSED */, void * /* data ATS_UNUSED */)
00504 {
00505   // In the base HttpSM, we don't have anything to
00506   //   do here.  subclasses can override this function
00507   //   to do their own asynchronous cleanup
00508   // So We're now ready to finish off the state machine
00509   terminate_sm = true;
00510   kill_this_async_done = true;
00511 
00512   return EVENT_DONE;
00513 }
00514 
00515 
00516 void
00517 HttpSM::start_sub_sm()
00518 {
00519   tunnel.init(this, mutex);
00520   cache_sm.init(this, mutex);
00521   transform_cache_sm.init(this, mutex);
00522 }
00523 
00524 void
00525 HttpSM::attach_client_session(HttpClientSession * client_vc, IOBufferReader * buffer_reader)
00526 {
00527   milestones.ua_begin = ink_get_hrtime();
00528   ink_assert(client_vc != NULL);
00529 
00530   ua_session = client_vc;
00531   mutex = client_vc->mutex;
00532   if (ua_session->debug()) debug_on = true;
00533 
00534   start_sub_sm();
00535 
00536   // Allocate a user agent entry in the state machine's
00537   //   vc table
00538   ua_entry = vc_table.new_entry();
00539   ua_entry->vc = client_vc;
00540   ua_entry->vc_type = HTTP_UA_VC;
00541 
00542   NetVConnection* netvc = client_vc->get_netvc();
00543 
00544   ats_ip_copy(&t_state.client_info.addr, netvc->get_remote_addr());
00545   t_state.client_info.port = netvc->get_local_port();
00546   t_state.client_info.is_transparent = netvc->get_is_transparent();
00547   t_state.backdoor_request = !client_vc->hooks_enabled();
00548   t_state.client_info.port_attribute = static_cast<HttpProxyPort::TransportType>(netvc->attributes);
00549 
00550   HTTP_INCREMENT_DYN_STAT(http_current_client_transactions_stat);
00551 
00552   // Record api hook set state
00553   hooks_set = client_vc->has_hooks();
00554 
00555   // Setup for parsing the header
00556   ua_buffer_reader = buffer_reader;
00557   ua_entry->vc_handler = &HttpSM::state_read_client_request_header;
00558   t_state.hdr_info.client_request.destroy();
00559   t_state.hdr_info.client_request.create(HTTP_TYPE_REQUEST);
00560   http_parser_init(&http_parser);
00561 
00562   // Prepare raw reader which will live until we are sure this is HTTP indeed
00563   if (is_transparent_passthrough_allowed()) {
00564       ua_raw_buffer_reader = buffer_reader->clone();
00565   }
00566 
00567   // We first need to run the transaction start hook.  Since
00568   //  this hook maybe asynchronous, we need to disable IO on
00569   //  client but set the continuation to be the state machine
00570   //  so if we get an timeout events the sm handles them
00571   ua_entry->read_vio = client_vc->do_io_read(this, 0, buffer_reader->mbuf);
00572 
00573   /////////////////////////
00574   // set up timeouts     //
00575   /////////////////////////
00576   client_vc->get_netvc()->set_inactivity_timeout(HRTIME_SECONDS(HttpConfig::m_master.accept_no_activity_timeout));
00577   client_vc->get_netvc()->set_active_timeout(HRTIME_SECONDS(HttpConfig::m_master.transaction_active_timeout_in));
00578 
00579   ++reentrancy_count;
00580   // Add our state sm to the sm list
00581   state_add_to_list(EVENT_NONE, NULL);
00582   // This is another external entry point and it is possible for the state machine to get terminated
00583   // while down the call chain from @c state_add_to_list. So we need to use the reentrancy_count to
00584   // prevent cleanup there and do it here as we return to the external caller.
00585   if (terminate_sm == true && reentrancy_count == 1) {
00586     kill_this();
00587   } else {
00588     --reentrancy_count;
00589     ink_assert(reentrancy_count >= 0);
00590   }
00591 }
00592 
00593 
00594 void
00595 HttpSM::setup_client_read_request_header()
00596 {
00597   ink_assert(ua_entry->vc_handler == &HttpSM::state_read_client_request_header);
00598 
00599   ua_entry->read_vio = ua_session->do_io_read(this, INT64_MAX, ua_buffer_reader->mbuf);
00600   // The header may already be in the buffer if this
00601   //  a request from a keep-alive connection
00602   if (ua_buffer_reader->read_avail() > 0)
00603     handleEvent(VC_EVENT_READ_READY, ua_entry->read_vio);
00604 }
00605 
00606 void
00607 HttpSM::setup_blind_tunnel_port()
00608 {
00609   // We gotten a connect on a port for blind tunneling so
00610   //  call transact figure out where it is going
00611   call_transact_and_set_next_state(HttpTransact::HandleBlindTunnel);
00612 }
00613 
00614 int
00615 HttpSM::state_read_client_request_header(int event, void *data)
00616 {
00617   STATE_ENTER(&HttpSM::state_read_client_request_header, event);
00618 
00619   ink_assert(ua_entry->read_vio == (VIO *) data);
00620   ink_assert(server_entry == NULL);
00621   ink_assert(server_session == NULL);
00622 
00623   int bytes_used = 0;
00624   ink_assert(ua_entry->eos == false);
00625 
00626   switch (event) {
00627   case VC_EVENT_READ_READY:
00628   case VC_EVENT_READ_COMPLETE:
00629     // More data to parse
00630     break;
00631 
00632   case VC_EVENT_EOS:
00633     ua_entry->eos = true;
00634     if (client_request_hdr_bytes !=0)
00635       break;
00636     // Fall through
00637   case VC_EVENT_ERROR:
00638   case VC_EVENT_INACTIVITY_TIMEOUT:
00639   case VC_EVENT_ACTIVE_TIMEOUT:
00640     // The user agent is hosed.  Close it &
00641     //   bail on the state machine
00642     vc_table.cleanup_entry(ua_entry);
00643     ua_entry = NULL;
00644     t_state.client_info.abort = HttpTransact::ABORTED;
00645     terminate_sm = true;
00646     return 0;
00647   }
00648 
00649   // Reset the inactivity timeout if this is the first
00650   //   time we've been called.  The timeout had been set to
00651   //   the accept timeout by the HttpClientSession
00652   //
00653   if (client_request_hdr_bytes == 0) {
00654     ua_session->get_netvc()->set_inactivity_timeout(HRTIME_SECONDS(t_state.txn_conf->transaction_no_activity_timeout_in));
00655   }
00656   /////////////////////
00657   // tokenize header //
00658   /////////////////////
00659 
00660   MIMEParseResult state = t_state.hdr_info.client_request.parse_req(&http_parser,
00661                                                         ua_buffer_reader,
00662                                                         &bytes_used,
00663                                                         ua_entry->eos);
00664 
00665   client_request_hdr_bytes += bytes_used;
00666 
00667   // Check to see if we are over the hdr size limit
00668   if (client_request_hdr_bytes > t_state.txn_conf->request_hdr_max_size) {
00669     DebugSM("http", "client header bytes were over max header size; treating as a bad request");
00670     state = PARSE_ERROR;
00671   }
00672 
00673   if (event == VC_EVENT_READ_READY &&
00674       state == PARSE_ERROR &&
00675       is_transparent_passthrough_allowed() &&
00676       ua_raw_buffer_reader != NULL) {
00677 
00678       DebugSM("http", "[%" PRId64 "] first request on connection failed parsing, switching to passthrough.", sm_id);
00679 
00680       t_state.transparent_passthrough = true;
00681       http_parser_clear(&http_parser);
00682 
00683       // Turn off read eventing until we get the
00684       // blind tunnel infrastructure set up
00685       ua_session->get_netvc()->do_io_read(this, 0, NULL);
00686 
00687       /* establish blind tunnel */
00688       setup_blind_tunnel_port();
00689       return 0;
00690   }
00691 
00692   // Check to see if we are done parsing the header
00693   if (state != PARSE_CONT || ua_entry->eos ||
00694         (state == PARSE_CONT && event == VC_EVENT_READ_COMPLETE)) {
00695     if (ua_raw_buffer_reader != NULL) {
00696         ua_raw_buffer_reader->dealloc();
00697         ua_raw_buffer_reader = NULL;
00698     }
00699     http_parser_clear(&http_parser);
00700     ua_entry->vc_handler = &HttpSM::state_watch_for_client_abort;
00701     milestones.ua_read_header_done = ink_get_hrtime();
00702   }
00703 
00704   switch (state) {
00705   case PARSE_ERROR:
00706     DebugSM("http", "[%" PRId64 "] error parsing client request header", sm_id);
00707 
00708     // Disable further I/O on the client
00709     ua_entry->read_vio->nbytes = ua_entry->read_vio->ndone;
00710 
00711     call_transact_and_set_next_state(HttpTransact::BadRequest);
00712     break;
00713 
00714   case PARSE_CONT:
00715     if (ua_entry->eos) {
00716       DebugSM("http_seq", "[%" PRId64 "] EOS before client request parsing finished", sm_id);
00717       set_ua_abort(HttpTransact::ABORTED, event);
00718 
00719       // Disable further I/O on the client
00720       ua_entry->read_vio->nbytes = ua_entry->read_vio->ndone;
00721 
00722       call_transact_and_set_next_state(HttpTransact::BadRequest);
00723       break;
00724     } else if (event == VC_EVENT_READ_COMPLETE) {
00725       DebugSM("http_parse", "[%" PRId64 "] VC_EVENT_READ_COMPLETE and PARSE CONT state", sm_id);
00726       break;
00727     } else {
00728       if (is_transparent_passthrough_allowed() &&
00729           ua_raw_buffer_reader != NULL &&
00730           ua_raw_buffer_reader->get_current_block()->write_avail() <= 0) {
00731         //Disable passthrough regardless of eventual parsing failure or success -- otherwise
00732         //we either have to consume some data or risk blocking the writer.
00733         ua_raw_buffer_reader->dealloc();
00734         ua_raw_buffer_reader = NULL;
00735       }
00736       ua_entry->read_vio->reenable();
00737       return VC_EVENT_CONT;
00738     }
00739   case PARSE_DONE:
00740     DebugSM("http", "[%" PRId64 "] done parsing client request header", sm_id);
00741 
00742     if (ua_session->m_active == false) {
00743       ua_session->m_active = true;
00744       HTTP_INCREMENT_DYN_STAT(http_current_active_client_connections_stat);
00745     }
00746     if (t_state.hdr_info.client_request.method_get_wksidx() == HTTP_WKSIDX_TRACE ||
00747          (t_state.hdr_info.request_content_length == 0 &&
00748           t_state.client_info.transfer_encoding != HttpTransact::CHUNKED_ENCODING)) {
00749 
00750       // Enable further IO to watch for client aborts
00751       ua_entry->read_vio->reenable();
00752     } else {
00753       // Disable further I/O on the client since there could
00754       //  be body that we are tunneling POST/PUT/CONNECT or
00755       //  extension methods and we can't issue another
00756       //  another IO later for the body with a different buffer
00757       ua_entry->read_vio->nbytes = ua_entry->read_vio->ndone;
00758     }
00759     //YTS Team, yamsat Plugin
00760     //Setting enable_redirection according to HttpConfig master
00761     if ((HttpConfig::m_master.number_of_redirections > 0) ||
00762         (t_state.method == HTTP_WKSIDX_POST && HttpConfig::m_master.post_copy_size))
00763       enable_redirection = HttpConfig::m_master.redirection_enabled;
00764 
00765     call_transact_and_set_next_state(HttpTransact::ModifyRequest);
00766 
00767     break;
00768   default:
00769     ink_assert(!"not reached");
00770   }
00771 
00772   return 0;
00773 }
00774 
00775 #ifdef PROXY_DRAIN
00776 int
00777 HttpSM::state_drain_client_request_body(int event, void *data)
00778 {
00779   STATE_ENTER(&HttpSM::state_drain_client_request_body, event);
00780 
00781   ink_assert(ua_entry->read_vio == (VIO *) data);
00782   ink_assert(ua_entry->vc == ua_session);
00783 
00784   switch (event) {
00785   case VC_EVENT_EOS:
00786   case VC_EVENT_ERROR:
00787   case VC_EVENT_ACTIVE_TIMEOUT:
00788   case VC_EVENT_INACTIVITY_TIMEOUT:
00789     {
00790       // Nothing we can do
00791       terminate_sm = true;
00792       break;
00793     }
00794   case VC_EVENT_READ_READY:
00795     {
00796       int64_t avail = ua_buffer_reader->read_avail();
00797       int64_t left = t_state.hdr_info.request_content_length - client_request_body_bytes;
00798 
00799       // Since we are only reading what's needed to complete
00800       //   the post, there must be something left to do
00801       ink_assert(avail < left);
00802 
00803       client_request_body_bytes += avail;
00804       ua_buffer_reader->consume(avail);
00805       ua_entry->read_vio->reenable_re();
00806       break;
00807     }
00808   case VC_EVENT_READ_COMPLETE:
00809     {
00810       // We've finished draing the POST body
00811       int64_t avail = ua_buffer_reader->read_avail();
00812 
00813       ua_buffer_reader->consume(avail);
00814       client_request_body_bytes += avail;
00815       ink_assert(client_request_body_bytes == t_state.hdr_info.request_content_length);
00816 
00817       ua_buffer_reader->mbuf->size_index = HTTP_HEADER_BUFFER_SIZE_INDEX;
00818       ua_entry->vc_handler = &HttpSM::state_watch_for_client_abort;
00819       ua_entry->read_vio = ua_entry->vc->do_io_read(this, INT64_MAX, ua_buffer_reader->mbuf);
00820       call_transact_and_set_next_state(NULL);
00821       break;
00822     }
00823   default:
00824     ink_release_assert(0);
00825   }
00826 
00827   return EVENT_DONE;
00828 }
00829 #endif /* PROXY_DRAIN */
00830 
00831 
00832 int
00833 HttpSM::state_watch_for_client_abort(int event, void *data)
00834 {
00835   STATE_ENTER(&HttpSM::state_watch_for_client_abort, event);
00836 
00837   ink_assert(ua_entry->read_vio == (VIO *) data);
00838   ink_assert(ua_entry->vc == ua_session);
00839 
00840   switch (event) {
00841   case VC_EVENT_EOS:
00842   case VC_EVENT_ERROR:
00843   case VC_EVENT_ACTIVE_TIMEOUT:
00844   case VC_EVENT_INACTIVITY_TIMEOUT:
00845     {
00846       if (tunnel.is_tunnel_active()) {
00847         // Check to see if the user agent is part of the tunnel.
00848         //  If so forward the event to the tunnel.  Otherwise,
00849         //  kill the tunnel and fallthrough to the case
00850         //  where the tunnel is not active
00851         HttpTunnelConsumer *c = tunnel.get_consumer(ua_session);
00852         if (c && c->alive) {
00853           DebugSM("http", "[%" PRId64 "] [watch_for_client_abort] "
00854                 "forwarding event %s to tunnel", sm_id, HttpDebugNames::get_event_name(event));
00855           tunnel.handleEvent(event, c->write_vio);
00856           return 0;
00857         } else {
00858           tunnel.kill_tunnel();
00859         }
00860       }
00861       // Disable further I/O on the client
00862       if (ua_entry->read_vio) {
00863         ua_entry->read_vio->nbytes = ua_entry->read_vio->ndone;
00864       }
00865       mark_server_down_on_client_abort();
00866       milestones.ua_close = ink_get_hrtime();
00867       set_ua_abort(HttpTransact::ABORTED, event);
00868       terminate_sm = true;
00869       break;
00870     }
00871   case VC_EVENT_READ_COMPLETE:
00872     // XXX Work around for TS-1233.
00873   case VC_EVENT_READ_READY:
00874     //  Ignore.  Could be a pipelined request.  We'll get to  it
00875     //    when we finish the current transaction
00876     break;
00877   default:
00878     ink_release_assert(0);
00879     break;
00880   }
00881 
00882   return 0;
00883 }
00884 
00885 void
00886 HttpSM::setup_push_read_response_header()
00887 {
00888   ink_assert(server_session == NULL);
00889   ink_assert(server_entry == NULL);
00890   ink_assert(ua_session != NULL);
00891   ink_assert(t_state.method == HTTP_WKSIDX_PUSH);
00892 
00893   // Set the handler to read the pushed response hdr
00894   ua_entry->vc_handler = &HttpSM::state_read_push_response_header;
00895 
00896   // We record both the total payload size as
00897   //  client_request_body_bytes and the bytes for the individual
00898   //  pushed hdr and body components
00899   pushed_response_hdr_bytes = 0;
00900   client_request_body_bytes = 0;
00901 
00902   // Note: we must use destroy() here since clear()
00903   //  does not free the memory from the header
00904   t_state.hdr_info.server_response.destroy();
00905   t_state.hdr_info.server_response.create(HTTP_TYPE_RESPONSE);
00906   http_parser_clear(&http_parser);
00907 
00908   // We already done the READ when we read the client
00909   //  request header
00910   ink_assert(ua_entry->read_vio);
00911 
00912   // If there is anything in the buffer call the parsing routines
00913   //  since if the response is finished, we won't get any
00914   //  additional callbacks
00915   int resp_hdr_state = VC_EVENT_CONT;
00916   if (ua_buffer_reader->read_avail() > 0) {
00917     if (ua_entry->eos) {
00918       resp_hdr_state = state_read_push_response_header(VC_EVENT_EOS, ua_entry->read_vio);
00919     } else {
00920       resp_hdr_state = state_read_push_response_header(VC_EVENT_READ_READY, ua_entry->read_vio);
00921     }
00922   }
00923   // It is possible that the entire PUSHed response header was already
00924   //  in the buffer.  In this case we don't want to fire off any more
00925   //  IO since we are going to switch buffers when we go to tunnel to
00926   //  the cache
00927   if (resp_hdr_state == VC_EVENT_CONT) {
00928     ink_assert(ua_entry->eos == false);
00929     ua_entry->read_vio = ua_session->do_io_read(this, INT64_MAX, ua_buffer_reader->mbuf);
00930   }
00931 }
00932 
00933 int
00934 HttpSM::state_read_push_response_header(int event, void *data)
00935 {
00936   STATE_ENTER(&HttpSM::state_read_push_response_header, event);
00937   ink_assert(ua_entry->read_vio == (VIO *) data);
00938   ink_assert(t_state.current.server == NULL);
00939 
00940   int64_t data_size = 0;
00941   int64_t  bytes_used = 0;
00942 
00943   // Not used here.
00944   //bool parse_error = false;
00945   //VIO* vio = (VIO*) data;
00946 
00947   switch (event) {
00948   case VC_EVENT_EOS:
00949     ua_entry->eos = true;
00950     // Fall through
00951 
00952   case VC_EVENT_READ_READY:
00953   case VC_EVENT_READ_COMPLETE:
00954     // More data to parse
00955     break;
00956 
00957   case VC_EVENT_ERROR:
00958   case VC_EVENT_INACTIVITY_TIMEOUT:
00959   case VC_EVENT_ACTIVE_TIMEOUT:
00960     // The user agent is hosed.  Send an error
00961     t_state.client_info.abort = HttpTransact::ABORTED;
00962     call_transact_and_set_next_state(HttpTransact::HandleBadPushRespHdr);
00963     return 0;
00964   }
00965 
00966   int state = PARSE_CONT;
00967   while (ua_buffer_reader->read_avail() && state == PARSE_CONT) {
00968     const char *start = ua_buffer_reader->start();
00969     const char *tmp = start;
00970     data_size = ua_buffer_reader->block_read_avail();
00971     ink_assert(data_size >= 0);
00972 
00973     /////////////////////
00974     // tokenize header //
00975     /////////////////////
00976     state = t_state.hdr_info.server_response.parse_resp(&http_parser, &tmp, tmp + data_size, false      // Only call w/ eof when data exhausted
00977       );
00978 
00979     bytes_used = tmp - start;
00980 
00981     ink_release_assert(bytes_used <= data_size);
00982     ua_buffer_reader->consume(bytes_used);
00983     pushed_response_hdr_bytes += bytes_used;
00984     client_request_body_bytes += bytes_used;
00985   }
00986 
00987   // We are out of data.  If we've received an EOS we need to
00988   //  call the parser with (eof == true) so it can determine
00989   //  whether to use the response as is or declare a parse error
00990   if (ua_entry->eos) {
00991     const char *end = ua_buffer_reader->start();
00992     state = t_state.hdr_info.server_response.parse_resp(&http_parser, &end, end, true   // We are out of data after server eos
00993       );
00994     ink_release_assert(state == PARSE_DONE || state == PARSE_ERROR);
00995   }
00996   // Don't allow 0.9 (unparsable headers) since TS doesn't
00997   //   cache 0.9 responses
00998   if (state == PARSE_DONE && t_state.hdr_info.server_response.version_get() == HTTPVersion(0, 9)) {
00999     state = PARSE_ERROR;
01000   }
01001 
01002   if (state != PARSE_CONT) {
01003     // Disable further IO
01004     ua_entry->read_vio->nbytes = ua_entry->read_vio->ndone;
01005     http_parser_clear(&http_parser);
01006     milestones.server_read_header_done = ink_get_hrtime();
01007   }
01008 
01009   switch (state) {
01010   case PARSE_ERROR:
01011     DebugSM("http", "[%" PRId64 "] error parsing push response header", sm_id);
01012     call_transact_and_set_next_state(HttpTransact::HandleBadPushRespHdr);
01013     break;
01014 
01015   case PARSE_CONT:
01016     ua_entry->read_vio->reenable();
01017     return VC_EVENT_CONT;
01018 
01019   case PARSE_DONE:
01020     DebugSM("http", "[%" PRId64 "] done parsing push response header", sm_id);
01021     call_transact_and_set_next_state(HttpTransact::HandlePushResponseHdr);
01022     break;
01023   default:
01024     ink_assert(!"not reached");
01025   }
01026 
01027   return VC_EVENT_DONE;
01028 }
01029 
01030 //////////////////////////////////////////////////////////////////////////////
01031 //
01032 //  HttpSM::state_http_server_open()
01033 //
01034 //////////////////////////////////////////////////////////////////////////////
01035 int
01036 HttpSM::state_raw_http_server_open(int event, void *data)
01037 {
01038   STATE_ENTER(&HttpSM::state_raw_http_server_open, event);
01039   ink_assert(server_entry == NULL);
01040   milestones.server_connect_end = ink_get_hrtime();
01041   NetVConnection *netvc = NULL;
01042 
01043   pending_action = NULL;
01044   switch (event) {
01045   case NET_EVENT_OPEN:
01046 
01047     if (t_state.pCongestionEntry != NULL) {
01048       t_state.pCongestionEntry->connection_opened();
01049       t_state.congestion_connection_opened = 1;
01050     }
01051     // Record the VC in our table
01052     server_entry = vc_table.new_entry();
01053     server_entry->vc = netvc = (NetVConnection *) data;
01054     server_entry->vc_type = HTTP_RAW_SERVER_VC;
01055     t_state.current.state = HttpTransact::CONNECTION_ALIVE;
01056 
01057     netvc->set_inactivity_timeout(HRTIME_SECONDS(t_state.txn_conf->transaction_no_activity_timeout_out));
01058     netvc->set_active_timeout(HRTIME_SECONDS(t_state.txn_conf->transaction_active_timeout_out));
01059     break;
01060 
01061   case VC_EVENT_ERROR:
01062   case NET_EVENT_OPEN_FAILED:
01063     if (t_state.pCongestionEntry != NULL) {
01064       t_state.current.state = HttpTransact::CONNECTION_ERROR;
01065       call_transact_and_set_next_state(HttpTransact::HandleResponse);
01066       return 0;
01067     } else {
01068       t_state.current.state = HttpTransact::OPEN_RAW_ERROR;
01069       // use this value just to get around other values
01070       t_state.hdr_info.response_error = HttpTransact::STATUS_CODE_SERVER_ERROR;
01071     }
01072     break;
01073   case CONGESTION_EVENT_CONGESTED_ON_F:
01074     t_state.current.state = HttpTransact::CONGEST_CONTROL_CONGESTED_ON_F;
01075     break;
01076   case CONGESTION_EVENT_CONGESTED_ON_M:
01077     t_state.current.state = HttpTransact::CONGEST_CONTROL_CONGESTED_ON_M;
01078     break;
01079 
01080   case EVENT_INTERVAL:
01081     Error("[HttpSM::state_raw_http_server_open] event: EVENT_INTERVAL state: %d server_entry: %p",
01082           t_state.current.state, server_entry);
01083     return 0;
01084 
01085   default:
01086     ink_release_assert(0);
01087     break;
01088   }
01089 
01090   call_transact_and_set_next_state(HttpTransact::OriginServerRawOpen);
01091   return 0;
01092 
01093 }
01094 
01095 
01096 // int HttpSM::state_request_wait_for_transform_read(int event, void* data)
01097 //
01098 //   We've done a successful transform open and issued a do_io_write
01099 //    to the transform.  We are now ready for the transform  to tell
01100 //    us it is now ready to be read from and it done modifying the
01101 //    server request header
01102 //
01103 int
01104 HttpSM::state_request_wait_for_transform_read(int event, void *data)
01105 {
01106   STATE_ENTER(&HttpSM::state_request_wait_for_transform_read, event);
01107   int64_t size = *((int64_t*)data);
01108 
01109   switch (event) {
01110   case TRANSFORM_READ_READY:
01111     if (size != INT64_MAX && size >= 0) {
01112       // We got a content length so update our internal
01113       //   data as well as fix up the request header
01114       t_state.hdr_info.transform_request_cl = size;
01115       t_state.hdr_info.server_request.value_set_int64(MIME_FIELD_CONTENT_LENGTH, MIME_LEN_CONTENT_LENGTH, size);
01116       setup_server_send_request_api();
01117       break;
01118     } else {
01119       // No content length from the post.  This is a no go
01120       //  since http spec requires content length when
01121       //  sending a request message body.  Change the event
01122       //  to an error and fall through
01123       event = VC_EVENT_ERROR;
01124       Log::error("Request transformation failed to set content length");
01125     }
01126     // FALLTHROUGH
01127   default:
01128     state_common_wait_for_transform_read(&post_transform_info, &HttpSM::tunnel_handler_post, event, data);
01129     break;
01130   }
01131 
01132   return 0;
01133 }
01134 
01135 
01136 // int HttpSM::state_response_wait_for_transform_read(int event, void* data)
01137 //
01138 //   We've done a successful transform open and issued a do_io_write
01139 //    to the transform.  We are now ready for the transform  to tell
01140 //    us it is now ready to be read from and it done modifying the
01141 //    user agent response header
01142 //
01143 int
01144 HttpSM::state_response_wait_for_transform_read(int event, void *data)
01145 {
01146   STATE_ENTER(&HttpSM::state_response_wait_for_transform_read, event);
01147   int64_t size = *((int64_t*)data);
01148 
01149   switch (event) {
01150   case TRANSFORM_READ_READY:
01151     if (size != INT64_MAX && size >= 0) {
01152       // We got a content length so update our internal state
01153       t_state.hdr_info.transform_response_cl = size;
01154       t_state.hdr_info.transform_response.value_set_int64(MIME_FIELD_CONTENT_LENGTH, MIME_LEN_CONTENT_LENGTH, size);
01155     } else {
01156       t_state.hdr_info.transform_response_cl = HTTP_UNDEFINED_CL;
01157     }
01158     call_transact_and_set_next_state(HttpTransact::handle_transform_ready);
01159     break;
01160   default:
01161     state_common_wait_for_transform_read(&transform_info, &HttpSM::tunnel_handler, event, data);
01162     break;
01163   }
01164 
01165   return 0;
01166 }
01167 
01168 
01169 // int HttpSM::state_common_wait_for_transform_read(...)
01170 //
01171 //   This function handles the overlapping cases between request and response
01172 //     transforms which prevents code duplication
01173 //
01174 int
01175 HttpSM::state_common_wait_for_transform_read(HttpTransformInfo * t_info, HttpSMHandler tunnel_handler, int event, void *data)
01176 {
01177   STATE_ENTER(&HttpSM::state_common_wait_for_transform_read, event);
01178   HttpTunnelConsumer *c = 0;
01179 
01180   switch (event) {
01181   case HTTP_TUNNEL_EVENT_DONE:
01182     // There are three reasons why the the tunnel could signal completed
01183     //   1) there was error from the transform write
01184     //   2) there was an error from the data source
01185     //   3) the transform write completed before it sent
01186     //      TRANSFORM_READ_READY which is legal and in which
01187     //      case we should just wait for the transform read ready
01188     c = tunnel.get_consumer(t_info->vc);
01189     ink_assert(c != NULL);
01190     ink_assert(c->vc == t_info->entry->vc);
01191 
01192     if (c->handler_state == HTTP_SM_TRANSFORM_FAIL) {
01193       // Case 1 we failed to complete the write to the
01194       //  transform fall through to vc event error case
01195       ink_assert(c->write_success == false);
01196     } else if (c->producer->read_success == false) {
01197       // Case 2 - error from data source
01198       if (c->producer->vc_type == HT_HTTP_CLIENT) {
01199         // Our source is the client.  POST can't
01200         //   be truncated so forward to the tunnel
01201         //   handler to clean this mess up
01202         ink_assert(t_info == &post_transform_info);
01203         return (this->*tunnel_handler) (event, data);
01204       } else {
01205         // On the response side, we just forward as much
01206         //   as we can of truncated documents so
01207         //   just don't cache the result
01208         ink_assert(t_info == &transform_info);
01209         t_state.api_info.cache_transformed = false;
01210         return 0;
01211       }
01212     } else {
01213       // Case 3 - wait for transform read ready
01214       return 0;
01215     }
01216     // FALLTHROUGH
01217   case VC_EVENT_ERROR:
01218     // Transform VC sends NULL on error conditions
01219     if (!c) {
01220       c = tunnel.get_consumer(t_info->vc);
01221       ink_assert(c != NULL);
01222     }
01223     vc_table.cleanup_entry(t_info->entry);
01224     t_info->entry = NULL;
01225     // In Case 1: error due to transform write,
01226     // we need to keep the original t_info->vc for transform_cleanup()
01227     // to skip do_io_close(); otherwise, set it to NULL.
01228     if (c->handler_state != HTTP_SM_TRANSFORM_FAIL) {
01229       t_info->vc = NULL;
01230     }
01231     tunnel.kill_tunnel();
01232     call_transact_and_set_next_state(HttpTransact::HandleApiErrorJump);
01233     break;
01234   default:
01235     ink_release_assert(0);
01236   }
01237 
01238   return 0;
01239 }
01240 
01241 // int HttpSM::state_api_callback(int event, void *data)
01242 
01243 //   InkAPI.cc calls us directly here to avoid problems
01244 //    with setting and changing the default_handler
01245 //    function.  As such, this is an entry point
01246 //    and needs to handle the reentrancy counter and
01247 //    deallocation the state machine if necessary
01248 //
01249 int
01250 HttpSM::state_api_callback(int event, void *data)
01251 {
01252   ink_release_assert(magic == HTTP_SM_MAGIC_ALIVE);
01253 
01254   ink_assert(reentrancy_count >= 0);
01255   reentrancy_count++;
01256 
01257   STATE_ENTER(&HttpSM::state_api_callback, event);
01258 
01259   state_api_callout(event, data);
01260 
01261   // The sub-handler signals when it is time for the state
01262   //  machine to exit.  We can only exit if we are not reentrantly
01263   //  called otherwise when the our call unwinds, we will be
01264   //  running on a dead state machine
01265   //
01266   // Because of the need for an api shutdown hook, kill_this()
01267   //  is also reentrant.  As such, we don't want to decrement
01268   //  the reentrancy count until after we run kill_this()
01269   //
01270   if (terminate_sm == true && reentrancy_count == 1) {
01271     kill_this();
01272   } else {
01273     reentrancy_count--;
01274     ink_assert(reentrancy_count >= 0);
01275   }
01276 
01277   return VC_EVENT_CONT;
01278 }
01279 
01280 int
01281 HttpSM::state_api_callout(int event, void *data)
01282 {
01283   // enum and variable for figuring out what the next action is after
01284   //   after we've finished the api state
01285   enum AfterApiReturn_t
01286   {
01287     API_RETURN_UNKNOWN = 0,
01288     API_RETURN_CONTINUE,
01289     API_RETURN_DEFERED_CLOSE,
01290     API_RETURN_DEFERED_SERVER_ERROR,
01291     API_RETURN_ERROR_JUMP,
01292     API_RETURN_SHUTDOWN,
01293     API_RETURN_INVALIDATE_ERROR
01294   };
01295   AfterApiReturn_t api_next = API_RETURN_UNKNOWN;
01296 
01297   if (event != EVENT_NONE) {
01298     STATE_ENTER(&HttpSM::state_api_callout, event);
01299   }
01300 
01301   switch (event) {
01302   case EVENT_INTERVAL:
01303     ink_assert(pending_action == data);
01304     pending_action = NULL;
01305     // FALLTHROUGH
01306   case EVENT_NONE:
01307   case HTTP_API_CONTINUE:
01308     if ((cur_hook_id >= 0) && (cur_hook_id < TS_HTTP_LAST_HOOK)) {
01309       if (!cur_hook) {
01310         if (cur_hooks == 0) {
01311           cur_hook = http_global_hooks->get(cur_hook_id);
01312           cur_hooks++;
01313         }
01314       }
01315       // even if ua_session is NULL, cur_hooks must
01316       // be incremented otherwise cur_hooks is not set to 2 and
01317       // transaction hooks (stored in api_hooks object) are not called.
01318       if (!cur_hook) {
01319         if (cur_hooks == 1) {
01320           if (ua_session) {
01321             cur_hook = ua_session->ssn_hook_get(cur_hook_id);
01322           }
01323           cur_hooks++;
01324         }
01325       }
01326       if (!cur_hook) {
01327         if (cur_hooks == 2) {
01328           cur_hook = api_hooks.get(cur_hook_id);
01329           cur_hooks++;
01330         }
01331       }
01332       if (cur_hook) {
01333         if (callout_state == HTTP_API_NO_CALLOUT) {
01334           callout_state = HTTP_API_IN_CALLOUT;
01335         }
01336 
01337         /* The MUTEX_TRY_LOCK macro was changed so
01338            that it can't handle NULL mutex'es.  The plugins
01339            can use null mutexes so we have to do this manually.
01340            We need to take a smart pointer to the mutex since
01341            the plugin could release it's mutex while we're on
01342            the callout
01343          */
01344         bool plugin_lock;
01345         Ptr<ProxyMutex> plugin_mutex;
01346         if (cur_hook->m_cont->mutex) {
01347           plugin_mutex = cur_hook->m_cont->mutex;
01348           plugin_lock = MUTEX_TAKE_TRY_LOCK(cur_hook->m_cont->mutex, mutex->thread_holding);
01349 
01350           if (!plugin_lock) {
01351             HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_api_callout);
01352             ink_assert(pending_action == NULL);
01353             pending_action = mutex->thread_holding->schedule_in(this, HRTIME_MSECONDS(10));
01354             return 0;
01355           }
01356         } else {
01357           plugin_lock = false;
01358         }
01359 
01360         DebugSM("http", "[%" PRId64 "] calling plugin on hook %s at hook %p",
01361               sm_id, HttpDebugNames::get_api_hook_name(cur_hook_id), cur_hook);
01362 
01363         APIHook *hook = cur_hook;
01364         cur_hook = cur_hook->next();
01365 
01366         hook->invoke(TS_EVENT_HTTP_READ_REQUEST_HDR + cur_hook_id, this);
01367 
01368         if (plugin_lock) {
01369           Mutex_unlock(plugin_mutex, mutex->thread_holding);
01370         }
01371 
01372         return 0;
01373       }
01374     }
01375     // Map the callout state into api_next
01376     switch (callout_state) {
01377     case HTTP_API_NO_CALLOUT:
01378     case HTTP_API_IN_CALLOUT:
01379       if (t_state.api_modifiable_cached_resp &&
01380           t_state.api_update_cached_object == HttpTransact::UPDATE_CACHED_OBJECT_PREPARE) {
01381         t_state.api_update_cached_object = HttpTransact::UPDATE_CACHED_OBJECT_CONTINUE;
01382       }
01383       api_next = API_RETURN_CONTINUE;
01384       break;
01385     case HTTP_API_DEFERED_CLOSE:
01386       api_next = API_RETURN_DEFERED_CLOSE;
01387       break;
01388     case HTTP_API_DEFERED_SERVER_ERROR:
01389       api_next = API_RETURN_DEFERED_SERVER_ERROR;
01390       break;
01391     default:
01392       ink_release_assert(0);
01393     }
01394     break;
01395 
01396   case HTTP_API_ERROR:
01397     if (callout_state == HTTP_API_DEFERED_CLOSE) {
01398       api_next = API_RETURN_DEFERED_CLOSE;
01399     } else if (cur_hook_id == TS_HTTP_TXN_CLOSE_HOOK) {
01400       // If we are closing the state machine, we can't
01401       //   jump to an error state so just continue
01402       api_next = API_RETURN_CONTINUE;
01403     } else if (t_state.api_http_sm_shutdown) {
01404       t_state.api_http_sm_shutdown = false;
01405       t_state.cache_info.object_read = NULL;
01406       cache_sm.close_read();
01407       transform_cache_sm.close_read();
01408       release_server_session();
01409       terminate_sm = true;
01410       api_next = API_RETURN_SHUTDOWN;
01411       t_state.squid_codes.log_code = SQUID_LOG_TCP_DENIED;
01412     } else if (t_state.api_modifiable_cached_resp &&
01413                t_state.api_update_cached_object == HttpTransact::UPDATE_CACHED_OBJECT_PREPARE) {
01414       t_state.api_update_cached_object = HttpTransact::UPDATE_CACHED_OBJECT_ERROR;
01415       api_next = API_RETURN_INVALIDATE_ERROR;
01416     } else {
01417       api_next = API_RETURN_ERROR_JUMP;
01418     }
01419     break;
01420 
01421     // We may receive an event from the tunnel
01422     // if it took a long time to call the SEND_RESPONSE_HDR hook
01423   case HTTP_TUNNEL_EVENT_DONE:
01424     state_common_wait_for_transform_read(&transform_info, &HttpSM::tunnel_handler, event, data);
01425     return 0;
01426 
01427   default:
01428     ink_assert(false);
01429     terminate_sm = true;
01430     return 0;
01431   }
01432 
01433   // Now that we're completed with the api state and figured out what
01434   //   to do next, do it
01435   callout_state = HTTP_API_NO_CALLOUT;
01436   switch (api_next) {
01437   case API_RETURN_CONTINUE:
01438     if (t_state.api_next_action == HttpTransact::SM_ACTION_API_SEND_RESPONSE_HDR) {
01439       do_redirect();
01440     }
01441     handle_api_return();
01442     break;
01443   case API_RETURN_DEFERED_CLOSE:
01444     ink_assert(t_state.api_next_action == HttpTransact::SM_ACTION_API_SM_SHUTDOWN);
01445     do_api_callout();
01446     break;
01447   case API_RETURN_DEFERED_SERVER_ERROR:
01448     ink_assert(t_state.api_next_action == HttpTransact::SM_ACTION_API_SEND_REQUEST_HDR);
01449     ink_assert(t_state.current.state != HttpTransact::CONNECTION_ALIVE);
01450     call_transact_and_set_next_state(HttpTransact::HandleResponse);
01451     break;
01452   case API_RETURN_ERROR_JUMP:
01453     call_transact_and_set_next_state(HttpTransact::HandleApiErrorJump);
01454     break;
01455   case API_RETURN_SHUTDOWN:
01456     break;
01457   case API_RETURN_INVALIDATE_ERROR:
01458     do_cache_prepare_update();
01459     break;
01460   default:
01461   case API_RETURN_UNKNOWN:
01462     ink_release_assert(0);
01463 
01464   }
01465 
01466   return 0;
01467 }
01468 
01469 // void HttpSM::handle_api_return()
01470 //
01471 //   Figures out what to do after calling api callouts
01472 //    have finished.  This messy and I would like
01473 //    to come up with a cleaner way to handle the api
01474 //    return.  The way we are doing things also makes a
01475 //    mess of set_next_state()
01476 //
01477 void
01478 HttpSM::handle_api_return()
01479 {
01480   switch (t_state.api_next_action) {
01481   case HttpTransact::SM_ACTION_API_SM_START:
01482     if (t_state.client_info.port_attribute == HttpProxyPort::TRANSPORT_BLIND_TUNNEL) {
01483       setup_blind_tunnel_port();
01484     } else {
01485       setup_client_read_request_header();
01486     }
01487     return;
01488   case HttpTransact::SM_ACTION_API_PRE_REMAP:
01489   case HttpTransact::SM_ACTION_API_POST_REMAP:
01490   case HttpTransact::SM_ACTION_API_READ_REQUEST_HDR:
01491   case HttpTransact::SM_ACTION_API_OS_DNS:
01492   case HttpTransact::SM_ACTION_API_READ_CACHE_HDR:
01493   case HttpTransact::SM_ACTION_API_READ_RESPONSE_HDR:
01494   case HttpTransact::SM_ACTION_API_CACHE_LOOKUP_COMPLETE:
01495     // this part is added for automatic redirect
01496     if (t_state.api_next_action == HttpTransact::SM_ACTION_API_READ_RESPONSE_HDR && t_state.api_release_server_session) {
01497       t_state.api_release_server_session = false;
01498       release_server_session();
01499     } else if (t_state.api_next_action == HttpTransact::SM_ACTION_API_CACHE_LOOKUP_COMPLETE &&
01500                t_state.api_cleanup_cache_read &&
01501                t_state.api_update_cached_object != HttpTransact::UPDATE_CACHED_OBJECT_PREPARE) {
01502       t_state.api_cleanup_cache_read = false;
01503       t_state.cache_info.object_read = NULL;
01504       t_state.request_sent_time = UNDEFINED_TIME;
01505       t_state.response_received_time = UNDEFINED_TIME;
01506       cache_sm.close_read();
01507       transform_cache_sm.close_read();
01508     }
01509     call_transact_and_set_next_state(NULL);
01510     return;
01511   case HttpTransact::SM_ACTION_API_SEND_REQUEST_HDR:
01512     setup_server_send_request();
01513     return;
01514   case HttpTransact::SM_ACTION_API_SEND_RESPONSE_HDR:
01515     // Set back the inactivity timeout
01516     if (ua_session) {
01517       ua_session->get_netvc()->set_inactivity_timeout(HRTIME_SECONDS(t_state.txn_conf->transaction_no_activity_timeout_in));
01518     }
01519     // we have further processing to do
01520     //  based on what t_state.next_action is
01521     break;
01522   case HttpTransact::SM_ACTION_API_SM_SHUTDOWN:
01523     state_remove_from_list(EVENT_NONE, NULL);
01524     return;
01525   default:
01526     ink_release_assert("! Not reached");
01527     break;
01528   }
01529 
01530   switch (t_state.next_action) {
01531   case HttpTransact::SM_ACTION_TRANSFORM_READ:
01532     {
01533       HttpTunnelProducer *p = setup_transfer_from_transform();
01534       perform_transform_cache_write_action();
01535       tunnel.tunnel_run(p);
01536       break;
01537     }
01538   case HttpTransact::SM_ACTION_SERVER_READ:
01539     {
01540       if (unlikely(t_state.did_upgrade_succeed)) {
01541        // We've sucessfully handled the upgrade, let's now setup
01542        // a blind tunnel.
01543        if(t_state.is_websocket) {
01544          HTTP_INCREMENT_DYN_STAT(http_websocket_current_active_client_connections_stat);
01545        }
01546 
01547        setup_blind_tunnel(true);
01548       } else {
01549        setup_server_transfer();
01550        perform_cache_write_action();
01551        tunnel.tunnel_run();
01552       }
01553       break;
01554     }
01555   case HttpTransact::SM_ACTION_SERVE_FROM_CACHE:
01556     {
01557       setup_cache_read_transfer();
01558       tunnel.tunnel_run();
01559       break;
01560     }
01561 
01562   case HttpTransact::SM_ACTION_INTERNAL_CACHE_WRITE:
01563     {
01564       if (cache_sm.cache_write_vc) {
01565         setup_internal_transfer(&HttpSM::tunnel_handler_cache_fill);
01566       } else {
01567         setup_internal_transfer(&HttpSM::tunnel_handler);
01568       }
01569       break;
01570     }
01571 
01572   case HttpTransact::SM_ACTION_INTERNAL_CACHE_NOOP:
01573   case HttpTransact::SM_ACTION_INTERNAL_CACHE_DELETE:
01574   case HttpTransact::SM_ACTION_INTERNAL_CACHE_UPDATE_HEADERS:
01575   case HttpTransact::SM_ACTION_SEND_ERROR_CACHE_NOOP:
01576     {
01577       setup_internal_transfer(&HttpSM::tunnel_handler);
01578       break;
01579     }
01580 
01581   case HttpTransact::SM_ACTION_REDIRECT_READ:
01582     {
01583       call_transact_and_set_next_state(HttpTransact::HandleRequest);
01584       break;
01585     }
01586 
01587   default:
01588     {
01589       ink_release_assert(!"Should not get here");
01590     }
01591   }
01592 }
01593 
01594 //////////////////////////////////////////////////////////////////////////////
01595 //
01596 //  HttpSM::state_http_server_open()
01597 //
01598 //////////////////////////////////////////////////////////////////////////////
01599 int
01600 HttpSM::state_http_server_open(int event, void *data)
01601 {
01602   DebugSM("http_track", "entered inside state_http_server_open");
01603   STATE_ENTER(&HttpSM::state_http_server_open, event);
01604   // TODO decide whether to uncomment after finish testing redirect
01605   // ink_assert(server_entry == NULL);
01606   pending_action = NULL;
01607   milestones.server_connect_end = ink_get_hrtime();
01608   HttpServerSession *session;
01609 
01610   switch (event) {
01611   case NET_EVENT_OPEN:
01612     session = (TS_SERVER_SESSION_SHARING_POOL_THREAD == t_state.txn_conf->server_session_sharing_pool) ? 
01613       THREAD_ALLOC_INIT(httpServerSessionAllocator, mutex->thread_holding) :
01614       httpServerSessionAllocator.alloc();
01615     session->sharing_pool = static_cast<TSServerSessionSharingPoolType>(t_state.txn_conf->server_session_sharing_pool);
01616     session->sharing_match = static_cast<TSServerSessionSharingMatchType>(t_state.txn_conf->server_session_sharing_match);
01617 //    session->share_session = t_state.txn_conf->share_server_sessions;
01618 
01619     // If origin_max_connections or origin_min_keep_alive_connections is
01620     // set then we are metering the max and or min number
01621     // of connections per host.  Set enable_origin_connection_limiting
01622     // to true in the server session so it will increment and decrement
01623     // the connection count.
01624     if (t_state.txn_conf->origin_max_connections > 0 ||
01625         t_state.http_config_param->origin_min_keep_alive_connections > 0) {
01626       DebugSM("http_ss", "[%" PRId64 "] max number of connections: %" PRIu64, sm_id, t_state.txn_conf->origin_max_connections);
01627       session->enable_origin_connection_limiting = true;
01628     }
01629     /*UnixNetVConnection * vc = (UnixNetVConnection*)(ua_session->client_vc);
01630        UnixNetVConnection *server_vc = (UnixNetVConnection*)data;
01631        printf("client fd is :%d , server fd is %d\n",vc->con.fd,
01632        server_vc->con.fd); */
01633     ats_ip_copy(&session->server_ip, &t_state.current.server->addr);
01634     session->new_connection((NetVConnection *) data);
01635     ats_ip_port_cast(&session->server_ip) = htons(t_state.current.server->port);
01636     session->state = HSS_ACTIVE;
01637 
01638     attach_server_session(session);
01639     if (t_state.current.request_to == HttpTransact::PARENT_PROXY) {
01640       session->to_parent_proxy = true;
01641       HTTP_INCREMENT_DYN_STAT(http_current_parent_proxy_connections_stat);
01642       HTTP_INCREMENT_DYN_STAT(http_total_parent_proxy_connections_stat);
01643 
01644     } else {
01645       session->to_parent_proxy = false;
01646     }
01647     handle_http_server_open();
01648     return 0;
01649   case EVENT_INTERVAL:
01650     do_http_server_open();
01651     break;
01652   case VC_EVENT_ERROR:
01653   case NET_EVENT_OPEN_FAILED:
01654     t_state.current.state = HttpTransact::CONNECTION_ERROR;
01655     // save the errno from the connect fail for future use (passed as negative value, flip back)
01656     t_state.current.server->set_connect_fail(event == NET_EVENT_OPEN_FAILED ? -reinterpret_cast<intptr_t>(data) : ECONNABORTED);
01657 
01658     /* If we get this error, then we simply can't bind to the 4-tuple to make the connection.  There's no hope of
01659        retries succeeding in the near future. The best option is to just shut down the connection without further
01660        comment. The only known cause for this is outbound transparency combined with use client target address / source
01661        port, as noted in TS-1424. If the keep alives desync the current connection can be attempting to rebind the 4
01662        tuple simultaneously with the shut down of an existing connection. Dropping the client side will cause it to pick
01663        a new source port and recover from this issue.
01664     */
01665     if (EADDRNOTAVAIL == t_state.current.server->connect_result) {
01666       if (is_debug_tag_set("http_tproxy")) {
01667         ip_port_text_buffer ip_c, ip_s;
01668         Debug("http_tproxy", "Force close of client connect (%s->%s) due to EADDRNOTAVAIL [%" PRId64 "]"
01669               , ats_ip_nptop(&t_state.client_info.addr.sa, ip_c, sizeof ip_c)
01670               , ats_ip_nptop(&t_state.server_info.addr.sa, ip_s, sizeof ip_s)
01671               , sm_id
01672           );
01673       }
01674       t_state.client_info.keep_alive = HTTP_NO_KEEPALIVE; // part of the problem, clear it.
01675       terminate_sm = true;
01676     } else {
01677       call_transact_and_set_next_state(HttpTransact::HandleResponse);
01678     }
01679     return 0;
01680   case CONGESTION_EVENT_CONGESTED_ON_F:
01681     t_state.current.state = HttpTransact::CONGEST_CONTROL_CONGESTED_ON_F;
01682     call_transact_and_set_next_state(HttpTransact::HandleResponse);
01683     return 0;
01684   case CONGESTION_EVENT_CONGESTED_ON_M:
01685     t_state.current.state = HttpTransact::CONGEST_CONTROL_CONGESTED_ON_M;
01686     call_transact_and_set_next_state(HttpTransact::HandleResponse);
01687     return 0;
01688 
01689   default:
01690     Error("[HttpSM::state_http_server_open] Unknown event: %d", event);
01691     ink_release_assert(0);
01692     return 0;
01693   }
01694 
01695   return 0;
01696 }
01697 
01698 
01699 int
01700 HttpSM::state_read_server_response_header(int event, void *data)
01701 {
01702   STATE_ENTER(&HttpSM::state_read_server_response_header, event);
01703   ink_assert(server_entry->read_vio == (VIO *) data);
01704   ink_assert(t_state.current.server->state == HttpTransact::STATE_UNDEFINED);
01705   ink_assert(t_state.current.state == HttpTransact::STATE_UNDEFINED);
01706 
01707   int bytes_used = 0;
01708   VIO *vio = (VIO *) data;
01709 
01710   switch (event) {
01711   case VC_EVENT_EOS:
01712     server_entry->eos = true;
01713 
01714     // If no bytes were transmitted, the parser treats
01715     // as a good 0.9 response which is technically is
01716     // but it's indistinguishable from an overloaded
01717     // server closing the connection so don't accept
01718     // zero length responses
01719     if (vio->ndone == 0) {
01720       // Error handling function
01721       handle_server_setup_error(event, data);
01722       return 0;
01723     }
01724     // Fall through
01725   case VC_EVENT_READ_READY:
01726   case VC_EVENT_READ_COMPLETE:
01727     // More data to parse
01728     break;
01729 
01730   case VC_EVENT_ERROR:
01731   case VC_EVENT_INACTIVITY_TIMEOUT:
01732   case VC_EVENT_ACTIVE_TIMEOUT:
01733     // Error handling function
01734     handle_server_setup_error(event, data);
01735     return 0;
01736   }
01737 
01738   // Reset the inactivity timeout if this is the first
01739   //   time we've been called.  The timeout had been set to
01740   //   the connect timeout when we set up to read the header
01741   //
01742   if (server_response_hdr_bytes == 0) {
01743     milestones.server_first_read = ink_get_hrtime();
01744 
01745     if (t_state.api_txn_no_activity_timeout_value != -1) {
01746       server_session->get_netvc()->set_inactivity_timeout(HRTIME_MSECONDS(t_state.api_txn_no_activity_timeout_value));
01747     } else {
01748       server_session->get_netvc()->set_inactivity_timeout(HRTIME_SECONDS(t_state.txn_conf->transaction_no_activity_timeout_out));
01749     }
01750 
01751     // For requests that contain a body, we can cancel the ua inactivity timeout.
01752     if (ua_session && t_state.hdr_info.request_content_length) {
01753       ua_session->get_netvc()->cancel_inactivity_timeout();
01754     }
01755   }
01756   /////////////////////
01757   // tokenize header //
01758   /////////////////////
01759   int state = t_state.hdr_info.server_response.parse_resp(&http_parser, server_buffer_reader,
01760                                                           &bytes_used, server_entry->eos);
01761 
01762   server_response_hdr_bytes += bytes_used;
01763 
01764   // Don't allow 0.9 (unparsable headers) on keep-alive connections after
01765   //  the connection has already served a transaction as what we are likely
01766   //  looking at is garbage on a keep-alive channel corrupted by the origin
01767   //  server
01768   if (state == PARSE_DONE &&
01769       t_state.hdr_info.server_response.version_get() == HTTPVersion(0, 9) && server_session->transact_count > 1) {
01770     state = PARSE_ERROR;
01771   }
01772   // Check to see if we are over the hdr size limit
01773   if (server_response_hdr_bytes > t_state.txn_conf->response_hdr_max_size) {
01774     state = PARSE_ERROR;
01775   }
01776 
01777   if (state != PARSE_CONT) {
01778     // Disable further IO
01779     server_entry->read_vio->nbytes = server_entry->read_vio->ndone;
01780     http_parser_clear(&http_parser);
01781     milestones.server_read_header_done = ink_get_hrtime();
01782   }
01783 
01784   switch (state) {
01785   case PARSE_ERROR:
01786     {
01787       // Many broken servers send really badly formed 302 redirects.
01788       //  Even if the parser doesn't like the redirect forward
01789       //  if it's got a Location header.  We check the type of the
01790       //  response to make sure that the parser was able to parse
01791       //  something  and didn't just throw up it's hands (INKqa05339)
01792       bool allow_error = false;
01793       if (t_state.hdr_info.server_response.type_get() == HTTP_TYPE_RESPONSE &&
01794           t_state.hdr_info.server_response.status_get() == HTTP_STATUS_MOVED_TEMPORARILY) {
01795         if (t_state.hdr_info.server_response.field_find(MIME_FIELD_LOCATION, MIME_LEN_LOCATION)) {
01796           allow_error = true;
01797         }
01798       }
01799 
01800       if (allow_error == false) {
01801         DebugSM("http_seq", "Error parsing server response header");
01802         t_state.current.state = HttpTransact::PARSE_ERROR;
01803 
01804         // If the server closed prematurely on us, use the
01805         //   server setup error routine since it will forward
01806         //   error to a POST tunnel if any
01807         if (event == VC_EVENT_EOS) {
01808           handle_server_setup_error(VC_EVENT_EOS, data);
01809         } else {
01810           call_transact_and_set_next_state(HttpTransact::HandleResponse);
01811         }
01812         break;
01813       }
01814       // FALLTHROUGH (since we are allowing the parse error)
01815     }
01816   case PARSE_DONE:
01817     DebugSM("http_seq", "Done parsing server response header");
01818 
01819     // Now that we know that we have all of the origin server
01820     // response headers, we can reset the client inactivity
01821     // timeout.  This is unlikely to cause a recurrence of
01822     // old bug because there will be no more retries now that
01823     // the connection has been established.  It is possible
01824     // however.  We do not need to reset the inactivity timeout
01825     // if the request contains a body (noted by the
01826     // request_content_length field) because it was never
01827     // cancelled.
01828     //
01829 
01830     // we now reset the client inactivity timeout only
01831     // when we are ready to send the response headers. In the
01832     // case of transform plugin, this is after the transform
01833     // outputs the 1st byte, which can take a long time if the
01834     // plugin buffers the whole response.
01835     // Also, if the request contains a body, we cancel the timeout
01836     // when we read the 1st byte of the origin server response.
01837     /*
01838        if (ua_session && !t_state.hdr_info.request_content_length) {
01839        ua_session->get_netvc()->set_inactivity_timeout(HRTIME_SECONDS(
01840        HttpConfig::m_master.accept_no_activity_timeout));
01841        }
01842      */
01843 
01844     t_state.current.state = HttpTransact::CONNECTION_ALIVE;
01845     t_state.transact_return_point = HttpTransact::HandleResponse;
01846     t_state.api_next_action = HttpTransact::SM_ACTION_API_READ_RESPONSE_HDR;
01847 
01848     // if exceeded limit deallocate postdata buffers and disable redirection
01849     if (enable_redirection && (redirection_tries < HttpConfig::m_master.number_of_redirections)) {
01850       ++redirection_tries;
01851     } else {
01852       tunnel.deallocate_redirect_postdata_buffers();
01853       enable_redirection = false;
01854     }
01855 
01856     do_api_callout();
01857     break;
01858   case PARSE_CONT:
01859     ink_assert(server_entry->eos == false);
01860     server_entry->read_vio->reenable();
01861     return VC_EVENT_CONT;
01862 
01863   default:
01864     ink_assert(!"not reached");
01865   }
01866 
01867   return 0;
01868 }
01869 
01870 int
01871 HttpSM::state_send_server_request_header(int event, void *data)
01872 {
01873   STATE_ENTER(&HttpSM::state_send_server_request_header, event);
01874   ink_assert(server_entry != NULL);
01875   ink_assert(server_entry->write_vio == (VIO *) data || server_entry->read_vio == (VIO *) data);
01876 
01877   int method;
01878 
01879   switch (event) {
01880   case VC_EVENT_WRITE_READY:
01881     server_entry->write_vio->reenable();
01882     break;
01883 
01884   case VC_EVENT_WRITE_COMPLETE:
01885     // We are done sending the request header, deallocate
01886     //  our buffer and then decide what to do next
01887     free_MIOBuffer(server_entry->write_buffer);
01888     server_entry->write_buffer = NULL;
01889     method = t_state.hdr_info.server_request.method_get_wksidx();
01890     if (!t_state.api_server_request_body_set &&
01891          method != HTTP_WKSIDX_TRACE &&
01892          (t_state.hdr_info.request_content_length > 0 || t_state.client_info.transfer_encoding == HttpTransact::CHUNKED_ENCODING)) {
01893 
01894       if (post_transform_info.vc) {
01895         setup_transform_to_server_transfer();
01896       } else {
01897         if (t_state.http_config_param->send_100_continue_response) {
01898           int len = 0;
01899           const char *expect = t_state.hdr_info.client_request.value_get(MIME_FIELD_EXPECT, MIME_LEN_EXPECT, &len);
01900           // When receive an "Expect: 100-continue" request from client, ATS sends a "100 Continue" response to client
01901           // imediately, before receive the real response from original server.
01902           if ((len == HTTP_LEN_100_CONTINUE) && (strncasecmp(expect, HTTP_VALUE_100_CONTINUE, HTTP_LEN_100_CONTINUE) == 0)) {
01903             int64_t alloc_index = buffer_size_to_index(len_100_continue_response);
01904             ua_entry->write_buffer = new_MIOBuffer(alloc_index);
01905             IOBufferReader *buf_start = ua_entry->write_buffer->alloc_reader();
01906 
01907             DebugSM("http_seq", "send 100 Continue response to client");
01908             int64_t nbytes = ua_entry->write_buffer->write(str_100_continue_response, len_100_continue_response);
01909             ua_session->do_io_write(ua_session->get_netvc(), nbytes, buf_start);
01910           }
01911         }
01912         do_setup_post_tunnel(HTTP_SERVER_VC);
01913       }
01914     } else {
01915       // It's time to start reading the response
01916       setup_server_read_response_header();
01917     }
01918 
01919     break;
01920 
01921   case VC_EVENT_READ_READY:
01922     // We already did the read for the response header and
01923     //  we got some data.  Wait for the request header
01924     //  send before dealing with it.  However, we need to
01925     //  disable further IO here since the whole response
01926     //  may be in the buffer and we can not switch buffers
01927     //  on the io core later
01928     ink_assert(server_entry->read_vio == (VIO *) data);
01929     // setting nbytes to ndone would disable reads and remove it from the read queue.
01930     // We can't do this in the epoll paradigm because we may be missing epoll errors that would
01931     // prevent us from leaving this state.
01932     // setup_server_read_response_header will trigger READ_READY to itself if there is data in the buffer.
01933 
01934     //server_entry->read_vio->nbytes = server_entry->read_vio->ndone;
01935 
01936     break;
01937 
01938   case VC_EVENT_EOS:
01939     // EOS of stream comes from the read side.  Treat it as
01940     //  as error if there is nothing in the read buffer.  If
01941     //  there is something the server may have blasted back
01942     //  the response before receiving the request.  Happens
01943     //  often with redirects
01944     //
01945     //  If we are in the middle of an api callout, it
01946     //    means we haven't actually sent the request yet
01947     //    so the stuff in the buffer is garbage and we
01948     //    want to ignore it
01949     //
01950     server_entry->eos = true;
01951 
01952     // I'm not sure about the above comment, but if EOS is received on read and we are
01953     // still in this state, we must have not gotten WRITE_COMPLETE.  With epoll we might not receive EOS
01954     // from both read and write sides of a connection so it should be handled correctly (close tunnels,
01955     // deallocate, etc) here with handle_server_setup_error().  Otherwise we might hang due to not shutting
01956     // down and never receiving another event again.
01957     /*if (server_buffer_reader->read_avail() > 0 && callout_state == HTTP_API_NO_CALLOUT) {
01958        break;
01959        } */
01960 
01961     // Nothing in the buffer
01962     //  FALLTHROUGH to error
01963   case VC_EVENT_ERROR:
01964   case VC_EVENT_ACTIVE_TIMEOUT:
01965   case VC_EVENT_INACTIVITY_TIMEOUT:
01966     handle_server_setup_error(event, data);
01967     break;
01968 
01969   default:
01970     ink_release_assert(0);
01971     break;
01972   }
01973 
01974   return 0;
01975 }
01976 
01977 void
01978 HttpSM::process_srv_info(HostDBInfo * r)
01979 {
01980   DebugSM("dns_srv", "beginning process_srv_info");
01981 
01982   /* we didnt get any SRV records, continue w normal lookup */
01983   if (!r || !r->is_srv || !r->round_robin) {
01984     t_state.dns_info.srv_hostname[0] = '\0';
01985     t_state.dns_info.srv_lookup_success = false;
01986     t_state.srv_lookup = false;
01987     DebugSM("dns_srv", "No SRV records were available, continuing to lookup %s", t_state.dns_info.lookup_name);
01988   } else {
01989     HostDBRoundRobin *rr = r->rr();
01990     HostDBInfo *srv = NULL;
01991     if (rr) {
01992       srv = rr->select_best_srv(t_state.dns_info.srv_hostname, &mutex.m_ptr->thread_holding->generator,
01993           ink_cluster_time(), (int) t_state.txn_conf->down_server_timeout);
01994     }
01995     if (!srv) {
01996       t_state.dns_info.srv_lookup_success = false;
01997       t_state.dns_info.srv_hostname[0] = '\0';
01998       t_state.srv_lookup = false;
01999       DebugSM("dns_srv", "SRV records empty for %s", t_state.dns_info.lookup_name);
02000     } else {
02001       ink_assert(r->md5_high == srv->md5_high && r->md5_low == srv->md5_low &&
02002           r->md5_low_low == srv->md5_low_low);
02003       t_state.dns_info.srv_lookup_success = true;
02004       t_state.dns_info.srv_port = srv->data.srv.srv_port;
02005       t_state.dns_info.srv_app = srv->app;
02006       //t_state.dns_info.single_srv = (rr->good == 1);
02007       ink_assert(srv->data.srv.key == makeHostHash(t_state.dns_info.srv_hostname));
02008       DebugSM("dns_srv", "select SRV records %s", t_state.dns_info.srv_hostname);
02009     }
02010   }
02011   return;
02012 }
02013 
02014 void
02015 HttpSM::process_hostdb_info(HostDBInfo * r)
02016 {
02017   if (r && !r->failed()) {
02018     ink_time_t now = ink_cluster_time();
02019     HostDBInfo *ret = NULL;
02020     t_state.dns_info.lookup_success = true;
02021     t_state.dns_info.lookup_validated = true;
02022     if (r->round_robin) {
02023       // if use_client_target_addr is set, make sure the client
02024       // addr sits in the pool
02025       if (t_state.http_config_param->use_client_target_addr == 1
02026         && t_state.client_info.is_transparent
02027         && t_state.dns_info.os_addr_style == HttpTransact::DNSLookupInfo::OS_ADDR_TRY_DEFAULT) {
02028       
02029         HostDBRoundRobin *rr = r->rr();
02030         sockaddr const* addr = t_state.state_machine->ua_session->get_netvc()->get_local_addr();
02031         if (rr && rr->find_ip(addr) == NULL) {
02032           // The client specified server address does not appear 
02033           // in the DNS pool
02034           DebugSM("http", "use_client_target_addr == 1. Client specified address is not in the pool. Client address is not validated.");
02035           t_state.dns_info.lookup_validated = false;
02036         }
02037         // Even if we did find the client specified address in the pool,
02038         // We want to make sure that that address is used and not some
02039         // other address in the DNS set.
02040         // Copy over the client information and give up on the lookup
02041         ats_ip_copy(t_state.host_db_info.ip(), addr);
02042         t_state.dns_info.os_addr_style = HttpTransact::DNSLookupInfo::OS_ADDR_TRY_CLIENT;
02043       } else {
02044         // Since the time elapsed between current time and client_request_time
02045         // may be very large, we cannot use client_request_time to approximate
02046         // current time when calling select_best_http().
02047         HostDBRoundRobin *rr = r->rr();
02048         ret = rr->select_best_http(&t_state.client_info.addr.sa, now, (int) t_state.txn_conf->down_server_timeout);
02049 
02050         // set the srv target`s last_failure
02051         if (t_state.dns_info.srv_lookup_success) {
02052           uint32_t last_failure = 0xFFFFFFFF;
02053           for (int i = 0; i < rr->rrcount && last_failure != 0; ++i) {
02054             if (last_failure > rr->info[i].app.http_data.last_failure)
02055               last_failure = rr->info[i].app.http_data.last_failure;
02056           }
02057 
02058           if (last_failure != 0 && (uint32_t) (now - t_state.txn_conf->down_server_timeout) < last_failure) {
02059             HostDBApplicationInfo app;
02060             app.allotment.application1 = 0;
02061             app.allotment.application2 = 0;
02062             app.http_data.last_failure = last_failure;
02063             hostDBProcessor.setby_srv(t_state.dns_info.lookup_name, 0, t_state.dns_info.srv_hostname, &app);
02064           }
02065         }
02066       }
02067     } else {
02068       if (t_state.http_config_param->use_client_target_addr == 1
02069         && t_state.client_info.is_transparent 
02070         && t_state.dns_info.os_addr_style == HttpTransact::DNSLookupInfo::OS_ADDR_TRY_DEFAULT) {
02071         // Compare the client specified address against the looked up address
02072         sockaddr const* addr = t_state.state_machine->ua_session->get_netvc()->get_local_addr();
02073         if (!ats_ip_addr_eq(addr, &r->data.ip.sa)) {
02074           DebugSM("http", "use_client_target_addr == 1.  Comparing single addresses failed. Client address is not validated.");
02075           t_state.dns_info.lookup_validated = false;
02076         } 
02077         // Regardless of whether the client address matches the DNS
02078         // record or not, we want to use that address.  Therefore,
02079         // we copy over the client address info and skip the assignment
02080         // from the DNS cache
02081         ats_ip_copy(t_state.host_db_info.ip(), addr);
02082         t_state.dns_info.os_addr_style = HttpTransact::DNSLookupInfo::OS_ADDR_TRY_CLIENT;
02083 
02084         // Leave ret unassigned, so we don't overwrite the host_db_info
02085       }
02086       else {
02087         ret = r;
02088       }
02089     }
02090     if (ret) {
02091       t_state.host_db_info = *ret;
02092       ink_release_assert(!t_state.host_db_info.reverse_dns);
02093       ink_release_assert(ats_is_ip(t_state.host_db_info.ip()));
02094     }
02095   } else {
02096     DebugSM("http", "[%" PRId64 "] DNS lookup failed for '%s'", sm_id, t_state.dns_info.lookup_name);
02097 
02098     t_state.dns_info.lookup_success = false;
02099     t_state.host_db_info.app.allotment.application1 = 0;
02100     t_state.host_db_info.app.allotment.application2 = 0;
02101     ink_assert(!t_state.host_db_info.round_robin);
02102   }
02103 
02104   milestones.dns_lookup_end = ink_get_hrtime();
02105 
02106   if (is_debug_tag_set("http_timeout")) {
02107     if (t_state.api_txn_dns_timeout_value != -1) {
02108       int foo = (int) (milestone_difference_msec(milestones.dns_lookup_begin, milestones.dns_lookup_end));
02109       DebugSM("http_timeout", "DNS took: %d msec", foo);
02110     }
02111   }
02112 }
02113 
02114 //////////////////////////////////////////////////////////////////////////////
02115 //
02116 //  HttpSM::state_hostdb_lookup()
02117 //
02118 //////////////////////////////////////////////////////////////////////////////
02119 int
02120 HttpSM::state_hostdb_lookup(int event, void *data)
02121 {
02122   STATE_ENTER(&HttpSM::state_hostdb_lookup, event);
02123 
02124 //    ink_assert (m_origin_server_vc == 0);
02125   // REQ_FLAVOR_SCHEDULED_UPDATE can be transformed into
02126   // REQ_FLAVOR_REVPROXY
02127   ink_assert(t_state.req_flavor == HttpTransact::REQ_FLAVOR_SCHEDULED_UPDATE ||
02128              t_state.req_flavor == HttpTransact::REQ_FLAVOR_REVPROXY || ua_entry->vc != NULL);
02129 
02130   switch (event) {
02131   case EVENT_HOST_DB_LOOKUP:
02132     pending_action = NULL;
02133     process_hostdb_info((HostDBInfo *) data);
02134     call_transact_and_set_next_state(NULL);
02135     break;
02136     case EVENT_SRV_LOOKUP:
02137     {
02138       pending_action = NULL;
02139       process_srv_info((HostDBInfo *) data);
02140 
02141       char *host_name = t_state.dns_info.srv_lookup_success ? t_state.dns_info.srv_hostname : t_state.dns_info.lookup_name;
02142       HostDBProcessor::Options opt;
02143       opt.port = t_state.dns_info.srv_lookup_success ? t_state.dns_info.srv_port : t_state.server_info.port;
02144       opt.flags = (t_state.cache_info.directives.does_client_permit_dns_storing)
02145             ? HostDBProcessor::HOSTDB_DO_NOT_FORCE_DNS
02146             : HostDBProcessor::HOSTDB_FORCE_DNS_RELOAD
02147           ;
02148       opt.timeout = (t_state.api_txn_dns_timeout_value != -1) ? t_state.api_txn_dns_timeout_value : 0;
02149       opt.host_res_style = ua_session->host_res_style;
02150 
02151       Action *dns_lookup_action_handle = hostDBProcessor.getbyname_imm(this,
02152                                                                  (process_hostdb_info_pfn) & HttpSM::
02153                                                                  process_hostdb_info,
02154                                                                  host_name, 0,
02155                                                                  opt);
02156       if (dns_lookup_action_handle != ACTION_RESULT_DONE) {
02157         ink_assert(!pending_action);
02158         pending_action = dns_lookup_action_handle;
02159         historical_action = pending_action;
02160       } else {
02161         call_transact_and_set_next_state(NULL);
02162       }
02163     }
02164     break;
02165   case EVENT_HOST_DB_IP_REMOVED:
02166     ink_assert(!"Unexpected event from HostDB");
02167     break;
02168   default:
02169     ink_assert(!"Unexpected event");
02170   }
02171 
02172   return 0;
02173 }
02174 
02175 int
02176 HttpSM::state_hostdb_reverse_lookup(int event, void *data)
02177 {
02178   STATE_ENTER(&HttpSM::state_hostdb_reverse_lookup, event);
02179 
02180   // REQ_FLAVOR_SCHEDULED_UPDATE can be transformed into
02181   // REQ_FLAVOR_REVPROXY
02182   ink_assert(t_state.req_flavor == HttpTransact::REQ_FLAVOR_SCHEDULED_UPDATE ||
02183              t_state.req_flavor == HttpTransact::REQ_FLAVOR_REVPROXY || ua_entry->vc != NULL);
02184 
02185   switch (event) {
02186   case EVENT_HOST_DB_LOOKUP:
02187     pending_action = NULL;
02188     if (data) {
02189       t_state.request_data.hostname_str = ((HostDBInfo *) data)->hostname();
02190     } else {
02191       DebugSM("http", "[%" PRId64 "] reverse DNS lookup failed for '%s'", sm_id, t_state.dns_info.lookup_name);
02192     }
02193     call_transact_and_set_next_state(NULL);
02194     break;
02195   default:
02196     ink_assert(!"Unexpected event");
02197   }
02198 
02199   return 0;
02200 }
02201 
02202 //////////////////////////////////////////////////////////////////////////////
02203 //
02204 //  HttpSM:state_mark_os_down()
02205 //
02206 //////////////////////////////////////////////////////////////////////////////
02207 int
02208 HttpSM::state_mark_os_down(int event, void *data)
02209 {
02210   HostDBInfo *mark_down = NULL;
02211 
02212   if (event == EVENT_HOST_DB_LOOKUP && data) {
02213     HostDBInfo *r = (HostDBInfo *) data;
02214 
02215     if (r->round_robin) {
02216       // Look for the entry we need mark down in the round robin
02217       ink_assert(t_state.current.server != NULL);
02218       ink_assert(t_state.current.request_to == HttpTransact::ORIGIN_SERVER);
02219       if (t_state.current.server) {
02220         mark_down = r->rr()->find_ip(&t_state.current.server->addr.sa);
02221       }
02222     } else {
02223       // No longer a round robin, check to see if our address is the same
02224       if (ats_ip_addr_eq(t_state.host_db_info.ip(), r->ip())) {
02225         mark_down = r;
02226       }
02227     }
02228 
02229     if (mark_down) {
02230       mark_host_failure(mark_down, t_state.request_sent_time);
02231     }
02232   }
02233   // We either found our entry or we did not.  Either way find
02234   //  the entry we should use now
02235   return state_hostdb_lookup(event, data);
02236 }
02237 
02238 //////////////////////////////////////////////////////////////////////////
02239 //
02240 //  HttpSM::state_handle_stat_page()
02241 //
02242 //////////////////////////////////////////////////////////////////////////
02243 int
02244 HttpSM::state_handle_stat_page(int event, void *data)
02245 {
02246   STATE_ENTER(&HttpSM::state_handle_stat_page, event);
02247   switch (event) {
02248   case STAT_PAGE_SUCCESS:
02249     pending_action = NULL;
02250 
02251     if (data) {
02252       StatPageData *spd = (StatPageData *) data;
02253 
02254       t_state.internal_msg_buffer = spd->data;
02255       if (spd->type)
02256         t_state.internal_msg_buffer_type = spd->type;
02257       else
02258         t_state.internal_msg_buffer_type = NULL; // Defaults to text/html
02259       t_state.internal_msg_buffer_size = spd->length;
02260       t_state.internal_msg_buffer_fast_allocator_size = -1;
02261     }
02262 
02263     call_transact_and_set_next_state(HttpTransact::HandleStatPage);
02264     break;
02265 
02266   case STAT_PAGE_FAILURE:
02267     pending_action = NULL;
02268     call_transact_and_set_next_state(HttpTransact::HandleStatPage);
02269     break;
02270 
02271   default:
02272     ink_release_assert(0);
02273     break;
02274   }
02275 
02276   return 0;
02277 }
02278 
02279 ///////////////////////////////////////////////////////////////
02280 //
02281 //  HttpSM::state_auth_callback()
02282 //
02283 ///////////////////////////////////////////////////////////////
02284 //int
02285 //HttpSM::state_auth_callback(int event, void *data)
02286 //{
02287  // STATE_ENTER(&HttpSM::state_auth_lookup, event);
02288 
02289   //ink_release_assert(ua_entry != NULL);
02290   //pending_action = NULL;
02291 
02292   //if (event == AUTH_MODULE_EVENT) {
02293    // authAdapter.HandleAuthResponse(event, data);
02294   //} else {
02295    // ink_release_assert(!"Unknown authentication module event");
02296   //}
02297     /************************************************************************\
02298      * pending_action=ACTION_RESULT_DONE only if Authentication step has    *
02299      *                                   been done & authorization is left  *
02300      * pending_action=NULL only if we have to set_next_state.               *
02301      * pending_action=something else. Don't do anything.                    *
02302      *                                One more callback is pending          *
02303     \************************************************************************/
02304 
02305   //if (authAdapter.stateChangeRequired()) {
02306    // set_next_state();
02307   //}
02308 // OLD AND UGLY: if (pending_action == NULL) {
02309 // OLD AND UGLY:        pending_action=NULL;
02310 // OLD AND UGLY:     } else if(pending_action == ACTION_RESULT_DONE) {
02311 // OLD AND UGLY:        pending_action=NULL;
02312 // OLD AND UGLY:     }
02313 
02314   //return EVENT_DONE;
02315 //}
02316 
02317 ///////////////////////////////////////////////////////////////
02318 //
02319 //  HttpSM::state_icp_lookup()
02320 //
02321 ///////////////////////////////////////////////////////////////
02322 int
02323 HttpSM::state_icp_lookup(int event, void *data)
02324 {
02325   STATE_ENTER(&HttpSM::state_icp_lookup, event);
02326 
02327   // ua_entry is NULL for scheduled updates
02328   ink_release_assert(ua_entry != NULL ||
02329                      t_state.req_flavor == HttpTransact::REQ_FLAVOR_SCHEDULED_UPDATE ||
02330                      t_state.req_flavor == HttpTransact::REQ_FLAVOR_REVPROXY);
02331   pending_action = NULL;
02332 
02333   switch (event) {
02334   case ICP_LOOKUP_FOUND:
02335 
02336     DebugSM("http", "ICP says ICP_LOOKUP_FOUND");
02337     t_state.icp_lookup_success = true;
02338     t_state.icp_ip_result = *(struct sockaddr_in *) data;
02339 
02340 /*
02341 *  Disable ICP loop detection since the Cidera network
02342 *    insists on trying to preload the cache from a
02343 *    a sibling cache.
02344 *
02345 *  // inhibit bad ICP looping behavior
02346 *  if (t_state.icp_ip_result.sin_addr.s_addr ==
02347 *    t_state.client_info.ip) {
02348 *      DebugSM("http","Loop in ICP config, bypassing...");
02349 *        t_state.icp_lookup_success = false;
02350 *  }
02351 */
02352     break;
02353 
02354   case ICP_LOOKUP_FAILED:
02355     DebugSM("http", "ICP says ICP_LOOKUP_FAILED");
02356     t_state.icp_lookup_success = false;
02357     break;
02358   default:
02359     ink_release_assert(0);
02360     break;
02361   }
02362 
02363   call_transact_and_set_next_state(HttpTransact::HandleICPLookup);
02364 
02365   return 0;
02366 }
02367 
02368 /////////////////////////////////////////////////////////////////////////////////
02369 //  HttpSM::state_cache_open_write()
02370 //
02371 //  This state is set by set_next_state() for a cache open write
02372 //  (SERVER_READ_CACHE_WRITE)
02373 //
02374 //////////////////////////////////////////////////////////////////////////
02375 int
02376 HttpSM::state_cache_open_write(int event, void *data)
02377 {
02378   STATE_ENTER(&HttpSM:state_cache_open_write, event);
02379   milestones.cache_open_write_end = ink_get_hrtime();
02380   pending_action = NULL;
02381 
02382   switch (event) {
02383   case CACHE_EVENT_OPEN_WRITE:
02384     //////////////////////////////
02385     // OPEN WRITE is successful //
02386     //////////////////////////////
02387     t_state.cache_info.write_lock_state = HttpTransact::CACHE_WL_SUCCESS;
02388     break;
02389 
02390   case CACHE_EVENT_OPEN_WRITE_FAILED: 
02391     // Failed on the write lock and retrying the vector
02392     //  for reading
02393     t_state.cache_info.write_lock_state = HttpTransact::CACHE_WL_FAIL;
02394     break;
02395 
02396   case CACHE_EVENT_OPEN_READ:
02397     // The write vector was locked and the cache_sm retried
02398     // and got the read vector again.
02399     cache_sm.cache_read_vc->get_http_info(&t_state.cache_info.object_read);
02400     t_state.cache_info.is_ram_cache_hit = (cache_sm.cache_read_vc)->is_ram_cache_hit();
02401 
02402     ink_assert(t_state.cache_info.object_read != 0);
02403     t_state.source = HttpTransact::SOURCE_CACHE;
02404     // clear up CACHE_LOOKUP_MISS, let Freshness function decide
02405     // hit status
02406     t_state.cache_lookup_result = HttpTransact::CACHE_LOOKUP_NONE;
02407     t_state.cache_info.write_lock_state = HttpTransact::CACHE_WL_READ_RETRY;
02408     break;
02409 
02410   case HTTP_TUNNEL_EVENT_DONE:
02411     // In the case where we have issued a cache write for the
02412     //  transformed copy, the tunnel from the origin server to
02413     //  the transform may complete while we are waiting for
02414     //  the cache write.  If this is the case, forward the event
02415     //  to the transform read state as it will know how to
02416     //  handle it
02417     if (t_state.next_action == HttpTransact::SM_ACTION_CACHE_ISSUE_WRITE_TRANSFORM) {
02418       state_common_wait_for_transform_read(&transform_info, &HttpSM::tunnel_handler, event, data);
02419 
02420       return 0;
02421     }
02422     // Fallthrough
02423   default:
02424     ink_release_assert(0);
02425   }
02426 
02427   if (t_state.api_lock_url != HttpTransact::LOCK_URL_FIRST) {
02428     if (event == CACHE_EVENT_OPEN_WRITE || event == CACHE_EVENT_OPEN_WRITE_FAILED) {
02429       if (t_state.api_lock_url == HttpTransact::LOCK_URL_SECOND) {
02430         t_state.api_lock_url = HttpTransact::LOCK_URL_ORIGINAL;
02431         do_cache_prepare_action(second_cache_sm, t_state.cache_info.second_object_read, true);
02432         return 0;
02433       } else {
02434         t_state.api_lock_url = HttpTransact::LOCK_URL_DONE;
02435       }
02436     } else if (event != CACHE_EVENT_OPEN_READ || t_state.api_lock_url != HttpTransact::LOCK_URL_SECOND)
02437       t_state.api_lock_url = HttpTransact::LOCK_URL_QUIT;
02438   }
02439   // The write either succeeded or failed, notify transact
02440   call_transact_and_set_next_state(NULL);
02441 
02442   return 0;
02443 }
02444 
02445 inline void
02446 HttpSM::setup_cache_lookup_complete_api()
02447 {
02448   t_state.api_next_action = HttpTransact::SM_ACTION_API_CACHE_LOOKUP_COMPLETE;
02449   do_api_callout();
02450 }
02451 
02452 //////////////////////////////////////////////////////////////////////////
02453 //
02454 //  HttpSM::state_cache_open_read()
02455 //
02456 //  This state handles the result of CacheProcessor::open_read()
02457 //  that attempts to do cache lookup and open a particular cached
02458 //  object for reading.
02459 //
02460 //////////////////////////////////////////////////////////////////////////
02461 int
02462 HttpSM::state_cache_open_read(int event, void *data)
02463 {
02464   STATE_ENTER(&HttpSM::state_cache_open_read, event);
02465   milestones.cache_open_read_end = ink_get_hrtime();
02466 
02467   ink_assert(server_entry == NULL);
02468   ink_assert(t_state.cache_info.object_read == 0);
02469 
02470   switch (event) {
02471   case CACHE_EVENT_OPEN_READ:
02472     {
02473       pending_action = NULL;
02474 
02475       DebugSM("http", "[%" PRId64 "] cache_open_read - CACHE_EVENT_OPEN_READ", sm_id);
02476 
02477       /////////////////////////////////
02478       // lookup/open is successful. //
02479       /////////////////////////////////
02480       ink_assert(cache_sm.cache_read_vc != NULL);
02481       t_state.source = HttpTransact::SOURCE_CACHE;
02482 
02483       cache_sm.cache_read_vc->get_http_info(&t_state.cache_info.object_read);
02484       t_state.cache_info.is_ram_cache_hit = (cache_sm.cache_read_vc)->is_ram_cache_hit();
02485 
02486       ink_assert(t_state.cache_info.object_read != 0);
02487       call_transact_and_set_next_state(HttpTransact::HandleCacheOpenRead);
02488       break;
02489     }
02490   case CACHE_EVENT_OPEN_READ_FAILED:
02491     pending_action = NULL;
02492 
02493     DebugSM("http", "[%" PRId64 "] cache_open_read - " "CACHE_EVENT_OPEN_READ_FAILED", sm_id);
02494     DebugSM("http", "[state_cache_open_read] open read failed.");
02495     // Inform HttpTransact somebody else is updating the document
02496     // HttpCacheSM already waited so transact should go ahead.
02497     if (data == (void *) -ECACHE_DOC_BUSY)
02498       t_state.cache_lookup_result = HttpTransact::CACHE_LOOKUP_DOC_BUSY;
02499     else
02500       t_state.cache_lookup_result = HttpTransact::CACHE_LOOKUP_MISS;
02501 
02502     ink_assert(t_state.transact_return_point == NULL);
02503     t_state.transact_return_point = HttpTransact::HandleCacheOpenRead;
02504     setup_cache_lookup_complete_api();
02505     break;
02506 
02507   default:
02508     ink_release_assert("!Unknown event");
02509     break;
02510   }
02511 
02512   return 0;
02513 }
02514 
02515 int
02516 HttpSM::main_handler(int event, void *data)
02517 {
02518   ink_release_assert(magic == HTTP_SM_MAGIC_ALIVE);
02519 
02520   HttpSMHandler jump_point = NULL;
02521   ink_assert(reentrancy_count >= 0);
02522   reentrancy_count++;
02523 
02524   // Don't use the state enter macro since it uses history
02525   //  space that we don't care about
02526   DebugSM("http", "[%" PRId64 "] [HttpSM::main_handler, %s]", sm_id, HttpDebugNames::get_event_name(event));
02527 
02528   HttpVCTableEntry *vc_entry = NULL;
02529 
02530   if (data != NULL) {
02531     // Only search the VC table if the event could have to
02532     //  do with a VIO to save a few cycles
02533 
02534     if (event < VC_EVENT_EVENTS_START + 100) {
02535       vc_entry = vc_table.find_entry((VIO *) data);
02536     }
02537   }
02538 
02539   if (vc_entry) {
02540     jump_point = vc_entry->vc_handler;
02541     ink_assert(jump_point != (HttpSMHandler)NULL);
02542     ink_assert(vc_entry->vc != (VConnection *)NULL);
02543     (this->*jump_point) (event, data);
02544   } else {
02545     ink_assert(default_handler != (HttpSMHandler)NULL);
02546     (this->*default_handler) (event, data);
02547   }
02548 
02549   // The sub-handler signals when it is time for the state
02550   //  machine to exit.  We can only exit if we are not reentrantly
02551   //  called otherwise when the our call unwinds, we will be
02552   //  running on a dead state machine
02553   //
02554   // Because of the need for an api shutdown hook, kill_this()
02555   // is also reentrant.  As such, we don't want to decrement
02556   // the reentrancy count until after we run kill_this()
02557   //
02558   if (terminate_sm == true && reentrancy_count == 1) {
02559     kill_this();
02560   } else {
02561     reentrancy_count--;
02562     ink_assert(reentrancy_count >= 0);
02563   }
02564 
02565   return (VC_EVENT_CONT);
02566 
02567 }
02568 
02569 // void HttpSM::tunnel_handler_post_or_put()
02570 //
02571 //   Handles the common cleanup tasks for Http post/put
02572 //   to prevent code duplication
02573 //
02574 void
02575 HttpSM::tunnel_handler_post_or_put(HttpTunnelProducer * p)
02576 {
02577   ink_assert(p->vc_type == HT_HTTP_CLIENT);
02578   HttpTunnelConsumer *c;
02579 
02580   // If there is a post transform, remove it's entry from the State
02581   //  Machine's VC table
02582   //
02583   // MUST NOT clear the vc pointer from post_transform_info
02584   //    as this causes a double close of the transform vc in transform_cleanup
02585   //
02586   if (post_transform_info.vc != NULL) {
02587     ink_assert(post_transform_info.entry->in_tunnel == true);
02588     ink_assert(post_transform_info.vc == post_transform_info.entry->vc);
02589     vc_table.cleanup_entry(post_transform_info.entry);
02590     post_transform_info.entry = NULL;
02591   }
02592 
02593   switch (p->handler_state) {
02594   case HTTP_SM_POST_SERVER_FAIL:
02595     c = tunnel.get_consumer(server_entry->vc);
02596     ink_assert(c->write_success == false);
02597     break;
02598   case HTTP_SM_POST_UA_FAIL:
02599     // UA quit - shutdown the SM
02600     ink_assert(p->read_success == false);
02601     terminate_sm = true;
02602     break;
02603   case HTTP_SM_POST_SUCCESS:
02604     // The post succeeded
02605     ink_assert(p->read_success == true);
02606     ink_assert(p->consumer_list.head->write_success == true);
02607     tunnel.deallocate_buffers();
02608     tunnel.reset();
02609     // When the ua completed sending it's data we must have
02610     //  removed it from the tunnel
02611     ink_release_assert(ua_entry->in_tunnel == false);
02612     server_entry->in_tunnel = false;
02613 
02614     break;
02615   default:
02616     ink_release_assert(0);
02617   }
02618 }
02619 
02620 // int HttpSM::tunnel_handler_post(int event, void* data)
02621 //
02622 //   Handles completion of any http request body tunnel
02623 //     Having 'post' in its name is a misnomer
02624 //
02625 int
02626 HttpSM::tunnel_handler_post(int event, void *data)
02627 {
02628   STATE_ENTER(&HttpSM::tunnel_handler_post, event);
02629 
02630   ink_assert(event == HTTP_TUNNEL_EVENT_DONE);
02631   ink_assert(data == &tunnel);
02632   // The tunnel calls this when it is done
02633 
02634   HttpTunnelProducer *p = tunnel.get_producer(ua_session);
02635   int p_handler_state = p->handler_state;
02636   tunnel_handler_post_or_put(p);
02637 
02638   switch (p_handler_state) {
02639   case HTTP_SM_POST_SERVER_FAIL:
02640     handle_post_failure();
02641     break;
02642   case HTTP_SM_POST_UA_FAIL:
02643     break;
02644   case HTTP_SM_POST_SUCCESS:
02645     // It's time to start reading the response
02646     setup_server_read_response_header();
02647     break;
02648   default:
02649     ink_release_assert(0);
02650   }
02651 
02652   return 0;
02653 }
02654 
02655 int
02656 HttpSM::tunnel_handler_cache_fill(int event, void *data)
02657 {
02658   STATE_ENTER(&HttpSM::tunnel_handler_cache_fill, event);
02659 
02660   ink_assert(event == HTTP_TUNNEL_EVENT_DONE);
02661   ink_assert(data == &tunnel);
02662 
02663   ink_release_assert(cache_sm.cache_write_vc);
02664 
02665   tunnel.deallocate_buffers();
02666   tunnel.deallocate_redirect_postdata_buffers();
02667   tunnel.reset();
02668 
02669   setup_server_transfer_to_cache_only();
02670   tunnel.tunnel_run();
02671 
02672   return 0;
02673 }
02674 
02675 int
02676 HttpSM::tunnel_handler_100_continue(int event, void *data)
02677 {
02678   STATE_ENTER(&HttpSM::tunnel_handler_100_continue, event);
02679 
02680   ink_assert(event == HTTP_TUNNEL_EVENT_DONE);
02681   ink_assert(data == &tunnel);
02682 
02683   // We're done sending the 100 continue.  If we succeeded,
02684   //   we set up to parse the next server response.  If we
02685   //   failed, shutdown the state machine
02686   HttpTunnelConsumer *c = tunnel.get_consumer(ua_session);
02687 
02688   if (c->write_success) {
02689     // Note: we must use destroy() here since clear()
02690     //  does not free the memory from the header
02691     t_state.hdr_info.client_response.destroy();
02692     tunnel.deallocate_buffers();
02693     tunnel.deallocate_redirect_postdata_buffers();
02694     tunnel.reset();
02695 
02696     if (server_entry->eos) {
02697       // if the server closed while sending the
02698       //    100 continue header, handle it here so we
02699       //    don't assert later
02700       DebugSM("http", "[%" PRId64 "] tunnel_handler_100_continue - server already " "closed, terminating connection", sm_id);
02701 
02702       // Since 100 isn't a final (loggable) response header
02703       //   kill the 100 continue header and create an empty one
02704       t_state.hdr_info.server_response.destroy();
02705       t_state.hdr_info.server_response.create(HTTP_TYPE_RESPONSE);
02706       handle_server_setup_error(VC_EVENT_EOS, server_entry->read_vio);
02707     } else {
02708       setup_server_read_response_header();
02709     }
02710   } else {
02711     terminate_sm = true;
02712   }
02713 
02714   return 0;
02715 }
02716 
02717 int
02718 HttpSM::tunnel_handler_push(int event, void *data)
02719 {
02720   STATE_ENTER(&HttpSM::tunnel_handler_push, event);
02721 
02722   ink_assert(event == HTTP_TUNNEL_EVENT_DONE);
02723   ink_assert(data == &tunnel);
02724 
02725   // Check to see if the client is still around
02726   HttpTunnelProducer *ua = tunnel.get_producer(ua_session);
02727 
02728   if (!ua->read_success) {
02729     // Client failed to send the body, it's gone.  Kill the
02730     // state machine
02731     terminate_sm = true;
02732     return 0;
02733   }
02734 
02735   HttpTunnelConsumer *cache = ua->consumer_list.head;
02736   ink_release_assert(cache->vc_type == HT_CACHE_WRITE);
02737   bool cache_write_success = cache->write_success;
02738 
02739   // Reset tunneling state since we need to send a response
02740   //  to client as whether we succeeded
02741   tunnel.deallocate_buffers();
02742   tunnel.deallocate_redirect_postdata_buffers();
02743   tunnel.reset();
02744 
02745   if (cache_write_success) {
02746     call_transact_and_set_next_state(HttpTransact::HandlePushTunnelSuccess);
02747   } else {
02748     call_transact_and_set_next_state(HttpTransact::HandlePushTunnelFailure);
02749   }
02750 
02751   return 0;
02752 }
02753 
02754 int
02755 HttpSM::tunnel_handler(int event, void *data)
02756 {
02757   STATE_ENTER(&HttpSM::tunnel_handler, event);
02758 
02759   ink_assert(event == HTTP_TUNNEL_EVENT_DONE);
02760   ink_assert(data == &tunnel);
02761   // The tunnel calls this when it is done
02762   terminate_sm = true;
02763 
02764   if (unlikely(t_state.is_websocket)) {
02765     HTTP_DECREMENT_DYN_STAT(http_websocket_current_active_client_connections_stat);
02766   }
02767 
02768   return 0;
02769 }
02770 
02771 
02772 
02773 /****************************************************
02774    TUNNELLING HANDLERS
02775    ******************************************************/
02776 
02777 bool
02778 HttpSM::is_http_server_eos_truncation(HttpTunnelProducer * p)
02779 {
02780 
02781   if ((p->do_dechunking || p->do_chunked_passthru) && p->chunked_handler.truncation) {
02782     return true;
02783   }
02784 
02785   //////////////////////////////////////////////////////////////
02786   // If we did not get or did not trust the origin server's   //
02787   //  content-length, read_content_length is unset.  The      //
02788   //  only way the end of the document is signaled is the     //
02789   //  origin server closing the connection.  However, we      //
02790   //  need to protect against the document getting truncated  //
02791   //  because the origin server crashed.  The following       //
02792   //  tabled outlines when we mark the server read as failed  //
02793   //                                                          //
02794   //    No C-L               :  read success                  //
02795   //    Received byts < C-L  :  read failed (=> Cache Abort)  //
02796   //    Received byts == C-L :  read success                  //
02797   //    Received byts > C-L  :  read success                  //
02798   //////////////////////////////////////////////////////////////
02799   int64_t cl = t_state.hdr_info.server_response.get_content_length();
02800 
02801   if (cl != UNDEFINED_COUNT && cl > server_response_body_bytes) {
02802     DebugSM("http", "[%" PRId64 "] server eos after %" PRId64".  Expected %" PRId64, sm_id, cl, server_response_body_bytes);
02803     return true;
02804   } else {
02805     return false;
02806   }
02807 }
02808 
02809 int
02810 HttpSM::tunnel_handler_server(int event, HttpTunnelProducer * p)
02811 {
02812   STATE_ENTER(&HttpSM::tunnel_handler_server, event);
02813 
02814   milestones.server_close = ink_get_hrtime();
02815 
02816   bool close_connection = false;
02817 
02818   if (t_state.current.server->keep_alive == HTTP_KEEPALIVE &&
02819       server_entry->eos == false && plugin_tunnel_type == HTTP_NO_PLUGIN_TUNNEL) {
02820     close_connection = false;
02821   } else {
02822     close_connection = true;
02823   }
02824 
02825   switch (event) {
02826   case VC_EVENT_INACTIVITY_TIMEOUT:
02827   case VC_EVENT_ACTIVE_TIMEOUT:
02828   case VC_EVENT_ERROR:
02829     t_state.squid_codes.log_code = SQUID_LOG_ERR_READ_TIMEOUT;
02830     t_state.squid_codes.hier_code = SQUID_HIER_TIMEOUT_DIRECT;
02831     /* fallthru */
02832 
02833   case VC_EVENT_EOS:
02834 
02835     switch (event) {
02836     case VC_EVENT_INACTIVITY_TIMEOUT:
02837       t_state.current.server->state = HttpTransact::INACTIVE_TIMEOUT;
02838       break;
02839     case VC_EVENT_ACTIVE_TIMEOUT:
02840       t_state.current.server->state = HttpTransact::ACTIVE_TIMEOUT;
02841       break;
02842     case VC_EVENT_ERROR:
02843       t_state.current.server->state = HttpTransact::CONNECTION_ERROR;
02844       break;
02845     case VC_EVENT_EOS:
02846       t_state.current.server->state = HttpTransact::TRANSACTION_COMPLETE;
02847       break;
02848     }
02849 
02850     close_connection = true;
02851 
02852     ink_assert(p->vc_type == HT_HTTP_SERVER);
02853 
02854     if (is_http_server_eos_truncation(p)) {
02855       DebugSM("http", "[%" PRId64 "] [HttpSM::tunnel_handler_server] aborting HTTP tunnel due to server truncation", sm_id);
02856       tunnel.chain_abort_all(p);
02857       t_state.current.server->abort = HttpTransact::ABORTED;
02858       t_state.client_info.keep_alive = HTTP_NO_KEEPALIVE;
02859       t_state.current.server->keep_alive = HTTP_NO_KEEPALIVE;
02860       t_state.squid_codes.log_code = SQUID_LOG_ERR_READ_ERROR;
02861     } else {
02862       DebugSM("http", "[%" PRId64 "] [HttpSM::tunnel_handler_server] finishing HTTP tunnel", sm_id);
02863       p->read_success = true;
02864       t_state.current.server->abort = HttpTransact::DIDNOT_ABORT;
02865       // Appending reason to a response without Content-Length will result in
02866       // the reason string being written to the client and a bad CL when reading from cache.
02867       // I didn't find anywhere this appended reason is being used, so commenting it out.
02868       /*
02869         if (t_state.negative_caching && p->bytes_read == 0) {
02870         int reason_len;
02871         const char *reason = t_state.hdr_info.server_response.reason_get(&reason_len);
02872         if (reason == NULL)
02873         tunnel.append_message_to_producer_buffer(p, "Negative Response", sizeof("Negative Response") - 1);
02874         else
02875         tunnel.append_message_to_producer_buffer(p, reason, reason_len);
02876         }
02877       */
02878       tunnel.local_finish_all(p);
02879     }
02880     break;
02881 
02882   case HTTP_TUNNEL_EVENT_PRECOMPLETE:
02883   case VC_EVENT_READ_COMPLETE:
02884     //
02885     // The transfer completed successfully
02886     //    If there is still data in the buffer, the server
02887     //    sent to much indicating a failed transfer
02888     p->read_success = true;
02889     t_state.current.server->state = HttpTransact::TRANSACTION_COMPLETE;
02890     t_state.current.server->abort = HttpTransact::DIDNOT_ABORT;
02891 
02892     if (p->do_dechunking || p->do_chunked_passthru) {
02893       if (p->chunked_handler.truncation) {
02894         tunnel.abort_cache_write_finish_others(p);
02895         // We couldn't read all chunks successfully:
02896         // Disable keep-alive.
02897         t_state.client_info.keep_alive = HTTP_NO_KEEPALIVE;
02898         t_state.current.server->keep_alive = HTTP_NO_KEEPALIVE;
02899       } else {
02900         tunnel.local_finish_all(p);
02901       }
02902     }
02903     break;
02904 
02905   case HTTP_TUNNEL_EVENT_CONSUMER_DETACH:
02906     // All consumers are prematurely gone.  Shutdown
02907     //    the server connection
02908     p->read_success = true;
02909     t_state.current.server->state = HttpTransact::TRANSACTION_COMPLETE;
02910     t_state.current.server->abort = HttpTransact::DIDNOT_ABORT;
02911     close_connection = true;
02912     break;
02913 
02914   case VC_EVENT_READ_READY:
02915   case VC_EVENT_WRITE_READY:
02916   case VC_EVENT_WRITE_COMPLETE:
02917   default:
02918     // None of these events should ever come our way
02919     ink_assert(0);
02920     break;
02921   }
02922 
02923   // turn off negative caching in case there are multiple server contacts
02924   if (t_state.negative_caching)
02925     t_state.negative_caching = false;
02926 
02927   // If we had a ground fill, check update our status
02928   if (background_fill == BACKGROUND_FILL_STARTED) {
02929     background_fill = p->read_success ? BACKGROUND_FILL_COMPLETED : BACKGROUND_FILL_ABORTED;
02930     HTTP_DECREMENT_DYN_STAT(http_background_fill_current_count_stat);
02931   }
02932   // We handled the event.  Now either shutdown the connection or
02933   //   setup it up for keep-alive
02934   ink_assert(server_entry->vc == p->vc);
02935   ink_assert(p->vc_type == HT_HTTP_SERVER);
02936   ink_assert(p->vc == server_session);
02937 
02938   if (close_connection) {
02939     p->vc->do_io_close();
02940     p->read_vio = NULL;
02941     /* TS-1424: if we're outbound transparent and using the client
02942        source port for the outbound connection we must effectively
02943        propagate server closes back to the client. Part of that is
02944        disabling KeepAlive if the server closes.
02945     */
02946     if (ua_session && ua_session->f_outbound_transparent && t_state.http_config_param->use_client_source_port) {
02947       t_state.client_info.keep_alive = HTTP_NO_KEEPALIVE;
02948     }
02949   } else {
02950     server_session->attach_hostname(t_state.current.server->name);
02951     server_session->server_trans_stat--;
02952     HTTP_DECREMENT_DYN_STAT(http_current_server_transactions_stat);
02953 
02954     // If the option to attach the server session to the client session is set
02955     // and if the client is still around and the client is keep-alive, attach the
02956     // server session to so the next ka request can use it.  Server sessions will
02957     // be placed into the shared pool if the next incoming request is for a different
02958     // origin server
02959     if (t_state.http_config_param->attach_server_session_to_client == 1 &&
02960         ua_session && t_state.client_info.keep_alive == HTTP_KEEPALIVE) {
02961       Debug("http", "attaching server session to the client");
02962       ua_session->attach_server_session(server_session);
02963     } else {
02964       // Release the session back into the shared session pool
02965       server_session->release();
02966     }
02967   }
02968 
02969   return 0;
02970 }
02971 
02972 // int HttpSM::tunnel_handler_100_continue_ua(int event, HttpTunnelConsumer* c)
02973 //
02974 //     Used for tunneling the 100 continue response.  The tunnel
02975 //       should not close or release the user agent unless there is
02976 //       an error since the real response is yet to come
02977 //
02978 int
02979 HttpSM::tunnel_handler_100_continue_ua(int event, HttpTunnelConsumer * c)
02980 {
02981   STATE_ENTER(&HttpSM::tunnel_handler_100_continue_ua, event);
02982 
02983   ink_assert(c->vc == ua_session);
02984 
02985   switch (event) {
02986   case VC_EVENT_EOS:
02987     ua_entry->eos = true;
02988     // FALL-THROUGH
02989   case VC_EVENT_INACTIVITY_TIMEOUT:
02990   case VC_EVENT_ACTIVE_TIMEOUT:
02991   case VC_EVENT_ERROR:
02992     set_ua_abort(HttpTransact::ABORTED, event);
02993     c->vc->do_io_close();
02994     break;
02995   case VC_EVENT_WRITE_COMPLETE:
02996     // mark the vc as no longer in tunnel
02997     //   so we don't get hosed if the ua abort before
02998     //   real response header is received
02999     ua_entry->in_tunnel = false;
03000     c->write_success = true;
03001   }
03002 
03003   return 0;
03004 }
03005 
03006 bool
03007 HttpSM::is_bg_fill_necessary(HttpTunnelConsumer * c)
03008 {
03009   ink_assert(c->vc_type == HT_HTTP_CLIENT);
03010 
03011   if (c->producer->alive && // something there to read
03012       server_entry && server_entry->vc && // from an origin server
03013       c->producer->num_consumers > 1  // with someone else reading it
03014     ) {
03015 
03016     // If threshold is 0.0 or negative then do background
03017     //   fill regardless of the content length.  Since this
03018     //   is floating point just make sure the number is near zero
03019     if (t_state.txn_conf->background_fill_threshold <= 0.001) {
03020       return true;
03021     }
03022 
03023     int64_t ua_cl = t_state.hdr_info.client_response.get_content_length();
03024 
03025     if (ua_cl > 0) {
03026       int64_t ua_body_done = c->bytes_written - client_response_hdr_bytes;
03027       float pDone = (float) ua_body_done / ua_cl;
03028 
03029       // If we got a good content length.  Check to make sure that we haven't already
03030       //  done more the content length since that would indicate the content-length
03031       //  is bogus.  If we've done more than the threshold, continue the background fill
03032       if (pDone <= 1.0 && pDone > t_state.txn_conf->background_fill_threshold) {
03033         return true;
03034       } else {
03035         DebugSM("http", "[%" PRId64 "] no background.  Only %%%f of %%%f done [%" PRId64 " / %" PRId64" ]", sm_id, pDone, t_state.txn_conf->background_fill_threshold, ua_body_done, ua_cl);
03036       }
03037 
03038     }
03039   }
03040 
03041   return false;
03042 }
03043 
03044 int
03045 HttpSM::tunnel_handler_ua(int event, HttpTunnelConsumer * c)
03046 {
03047   bool close_connection = true;
03048   HttpTunnelProducer *p = NULL;
03049   HttpTunnelConsumer *selfc = NULL;
03050 
03051   STATE_ENTER(&HttpSM::tunnel_handler_ua, event);
03052   ink_assert(c->vc == ua_session);
03053   milestones.ua_close = ink_get_hrtime();
03054 
03055   switch (event) {
03056   case VC_EVENT_EOS:
03057     ua_entry->eos = true;
03058     // FALL-THROUGH
03059   case VC_EVENT_INACTIVITY_TIMEOUT:
03060   case VC_EVENT_ACTIVE_TIMEOUT:
03061   case VC_EVENT_ERROR:
03062 
03063     // The user agent died or aborted.  Check to
03064     //  see if we should setup a background fill
03065     set_ua_abort(HttpTransact::ABORTED, event);
03066 
03067     if (is_bg_fill_necessary(c)) {
03068       DebugSM("http", "[%" PRId64 "] Initiating background fill", sm_id);
03069       background_fill = BACKGROUND_FILL_STARTED;
03070       HTTP_INCREMENT_DYN_STAT(http_background_fill_current_count_stat);
03071 
03072       // There is another consumer (cache write) so
03073       //  detach the user agent
03074       ink_assert(server_entry->vc == server_session);
03075       ink_assert(c->is_downstream_from(server_session));
03076       server_session->get_netvc()->
03077         set_active_timeout(HRTIME_SECONDS(t_state.txn_conf->background_fill_active_timeout));
03078     } else {
03079       // No background fill
03080       p = c->producer;
03081       tunnel.chain_abort_all(c->producer);
03082       selfc = p->self_consumer;
03083       if (selfc) {
03084         // This is the case where there is a transformation between ua and os
03085         p = selfc->producer;
03086         // if producer is the cache or OS, close the producer.
03087         // Otherwise in case of large docs, producer iobuffer gets filled up,
03088         // waiting for a consumer to consume data and the connection is never closed.
03089         if (p->alive &&
03090             ((p->vc_type == HT_CACHE_READ) || (p->vc_type == HT_HTTP_SERVER))) {
03091           tunnel.chain_abort_all(p);
03092         }
03093       }
03094     }
03095     break;
03096 
03097   case VC_EVENT_WRITE_COMPLETE:
03098     c->write_success = true;
03099     t_state.client_info.abort = HttpTransact::DIDNOT_ABORT;
03100     if (t_state.client_info.keep_alive == HTTP_KEEPALIVE) {
03101       if (t_state.www_auth_content != HttpTransact::CACHE_AUTH_SERVE || ua_session->get_bound_ss()) {
03102         // successful keep-alive
03103         close_connection = false;
03104       }
03105       // else { the authenticated server connection (cache
03106       // authenticated feature) closed during the serve-from-cache.
03107       // We want the client to issue a new connection for the
03108       // session based authenticated mechanism like NTLM, instead
03109       // of still using the existing client connection. }
03110     }
03111     break;
03112   case VC_EVENT_WRITE_READY:
03113   case VC_EVENT_READ_READY:
03114   case VC_EVENT_READ_COMPLETE:
03115   default:
03116     // None of these events should ever come our way
03117     ink_assert(0);
03118     break;
03119   }
03120 
03121   client_response_body_bytes = c->bytes_written - client_response_hdr_bytes;
03122 
03123   if (client_response_body_bytes < 0)
03124     client_response_body_bytes = 0;
03125 
03126   // attribute the size written to the client from various sources
03127   // NOTE: responses that go through a range transform are attributed
03128   // to their original sources
03129   // all other transforms attribute the total number of input bytes
03130   // to a source in HttpSM::tunnel_handler_transform_write
03131   //
03132   HttpTransact::Source_t original_source = t_state.source;
03133   if (HttpTransact::SOURCE_TRANSFORM == original_source &&
03134       t_state.range_setup != HttpTransact::RANGE_NONE) {
03135     original_source = t_state.pre_transform_source;
03136   }
03137 
03138   switch (original_source) {
03139   case HttpTransact::SOURCE_HTTP_ORIGIN_SERVER:
03140     server_response_body_bytes = client_response_body_bytes;
03141     break;
03142   case HttpTransact::SOURCE_CACHE:
03143     cache_response_body_bytes = client_response_body_bytes;
03144     break;
03145   default:
03146     break;
03147   }
03148 
03149   ink_assert(ua_entry->vc == c->vc);
03150   if (close_connection) {
03151     // If the client could be pipelining or is doing a POST, we need to
03152     //   set the ua_session into half close mode
03153     if ((t_state.method == HTTP_WKSIDX_POST || t_state.client_info.pipeline_possible == true)
03154         && event == VC_EVENT_WRITE_COMPLETE) {
03155       ua_session->set_half_close_flag();
03156     }
03157 
03158     ua_session->do_io_close();
03159     ua_session = NULL;
03160   } else {
03161     ink_assert(ua_buffer_reader != NULL);
03162     ua_session->release(ua_buffer_reader);
03163     ua_buffer_reader = NULL;
03164     ua_session = NULL;
03165   }
03166 
03167   return 0;
03168 }
03169 
03170 int
03171 HttpSM::tunnel_handler_ua_push(int event, HttpTunnelProducer * p)
03172 {
03173   STATE_ENTER(&HttpSM::tunnel_handler_ua_push, event);
03174 
03175   pushed_response_body_bytes += p->bytes_read;
03176   client_request_body_bytes += p->bytes_read;
03177 
03178   switch (event) {
03179   case VC_EVENT_INACTIVITY_TIMEOUT:
03180   case VC_EVENT_ACTIVE_TIMEOUT:
03181   case VC_EVENT_ERROR:
03182   case VC_EVENT_EOS:
03183     // Transfer terminated.  Bail on the cache write.
03184     t_state.client_info.abort = HttpTransact::ABORTED;
03185     p->vc->do_io_close(EHTTP_ERROR);
03186     p->read_vio = NULL;
03187     tunnel.chain_abort_all(p);
03188     break;
03189 
03190   case HTTP_TUNNEL_EVENT_PRECOMPLETE:
03191   case VC_EVENT_READ_COMPLETE:
03192     //
03193     // The transfer completed successfully
03194     p->read_success = true;
03195     ua_entry->in_tunnel = false;
03196     break;
03197 
03198   case VC_EVENT_READ_READY:
03199   case VC_EVENT_WRITE_READY:
03200   case VC_EVENT_WRITE_COMPLETE:
03201   default:
03202     // None of these events should ever come our way
03203     ink_assert(0);
03204     break;
03205   }
03206 
03207   return 0;
03208 }
03209 
03210 int
03211 HttpSM::tunnel_handler_cache_read(int event, HttpTunnelProducer * p)
03212 {
03213   STATE_ENTER(&HttpSM::tunnel_handler_cache_read, event);
03214 
03215   switch (event) {
03216   case VC_EVENT_ERROR:
03217   case VC_EVENT_EOS:
03218     ink_assert(t_state.cache_info.object_read->valid());
03219     if (t_state.cache_info.object_read->object_size_get() != INT64_MAX || event == VC_EVENT_ERROR) {
03220       // Abnormal termination
03221       t_state.squid_codes.log_code = SQUID_LOG_TCP_SWAPFAIL;
03222       p->vc->do_io_close(EHTTP_ERROR);
03223       p->read_vio = NULL;
03224       tunnel.chain_abort_all(p);
03225       HTTP_INCREMENT_TRANS_STAT(http_cache_read_errors);
03226       break;
03227     } else {
03228       tunnel.local_finish_all(p);
03229       // fall through for the case INT64_MAX read with VC_EVENT_EOS
03230       // callback (read successful)
03231     }
03232   case VC_EVENT_READ_COMPLETE:
03233   case HTTP_TUNNEL_EVENT_PRECOMPLETE:
03234   case HTTP_TUNNEL_EVENT_CONSUMER_DETACH:
03235     p->read_success = true;
03236     p->vc->do_io_close();
03237     p->read_vio = NULL;
03238     break;
03239   default:
03240     ink_release_assert(0);
03241     break;
03242   }
03243 
03244   HTTP_DECREMENT_DYN_STAT(http_current_cache_connections_stat);
03245   return 0;
03246 }
03247 
03248 
03249 int
03250 HttpSM::tunnel_handler_cache_write(int event, HttpTunnelConsumer * c)
03251 {
03252   STATE_ENTER(&HttpSM::tunnel_handler_cache_write, event);
03253 
03254   HttpTransact::CacheWriteStatus_t * status_ptr =
03255     (c->producer->vc_type == HT_TRANSFORM) ?
03256     &t_state.cache_info.transform_write_status : &t_state.cache_info.write_status;
03257 
03258   switch (event) {
03259   case VC_EVENT_ERROR:
03260   case VC_EVENT_EOS:
03261     // Abnormal termination
03262     *status_ptr = HttpTransact::CACHE_WRITE_ERROR;
03263     c->write_vio = NULL;
03264     c->vc->do_io_close(EHTTP_ERROR);
03265 
03266     HTTP_INCREMENT_TRANS_STAT(http_cache_write_errors);
03267     DebugSM("http", "[%" PRId64 "] aborting cache write due %s event from cache", sm_id, HttpDebugNames::get_event_name(event));
03268     // abort the producer if the cache_writevc is the only consumer.
03269     if (c->producer->alive && c->producer->num_consumers == 1)
03270       tunnel.chain_abort_all(c->producer);
03271     break;
03272   case VC_EVENT_WRITE_COMPLETE:
03273     // if we've never initiated a cache write
03274     //   abort the cache since it's finicky about a close
03275     //   in this case.  This case can only occur
03276     //   we got a truncated header from the origin server
03277     //   but decided to accept it anyways
03278     if (c->write_vio == NULL) {
03279       *status_ptr = HttpTransact::CACHE_WRITE_ERROR;
03280       c->write_success = false;
03281       c->vc->do_io_close(EHTTP_ERROR);
03282     } else {
03283       *status_ptr = HttpTransact::CACHE_WRITE_COMPLETE;
03284       c->write_success = true;
03285       c->write_vio = c->vc->do_io(VIO::CLOSE);
03286     }
03287     break;
03288   default:
03289     // All other events indicate problems
03290     ink_assert(0);
03291     break;
03292   }
03293 
03294   HTTP_DECREMENT_DYN_STAT(http_current_cache_connections_stat);
03295   return 0;
03296 }
03297 
03298 int
03299 HttpSM::tunnel_handler_post_ua(int event, HttpTunnelProducer * p)
03300 {
03301   STATE_ENTER(&HttpSM::tunnel_handler_post_ua, event);
03302   client_request_body_bytes = p->init_bytes_done + p->bytes_read;
03303 
03304   switch (event) {
03305   case VC_EVENT_EOS:
03306     // My reading of spec says that user agents can not terminate
03307     //  posts with a half close so this is an error
03308   case VC_EVENT_ERROR:
03309   case VC_EVENT_INACTIVITY_TIMEOUT:
03310   case VC_EVENT_ACTIVE_TIMEOUT:
03311     //  Did not complete post tunnling.  Abort the
03312     //   server and close the ua
03313     p->handler_state = HTTP_SM_POST_UA_FAIL;
03314     tunnel.chain_abort_all(p);
03315     p->read_vio = NULL;
03316     p->vc->do_io_close(EHTTP_ERROR);
03317     set_ua_abort(HttpTransact::ABORTED, event);
03318 
03319     // the in_tunnel status on both the ua & and
03320     //   it's consumer must already be set to true.  Previously
03321     //   we were setting it again to true but incorrectly in
03322     //   the case of a transform
03323     hsm_release_assert(ua_entry->in_tunnel == true);
03324     if (p->consumer_list.head->vc_type == HT_TRANSFORM) {
03325       hsm_release_assert(post_transform_info.entry->in_tunnel == true);
03326     } else {
03327       hsm_release_assert(server_entry->in_tunnel == true);
03328     }
03329     break;
03330 
03331   case VC_EVENT_READ_COMPLETE:
03332   case HTTP_TUNNEL_EVENT_PRECOMPLETE:
03333     // We have completed reading POST data from client here.
03334     // It's time to free MIOBuffer of 100 Continue's response now,
03335     // althought this is a little late.
03336     if (t_state.http_config_param->send_100_continue_response &&
03337        ua_entry->write_buffer) {
03338       free_MIOBuffer(ua_entry->write_buffer);
03339       ua_entry->write_buffer = NULL;
03340     }
03341 
03342     // Completed successfully
03343     if (t_state.txn_conf->keep_alive_post_out == 0) {
03344       // don't share the session if keep-alive for post is not on
03345       DebugSM("http_ss", "Setting server session to private because of keep-alive post out");
03346       set_server_session_private(true);
03347     }
03348 
03349     p->handler_state = HTTP_SM_POST_SUCCESS;
03350     p->read_success = true;
03351     ua_entry->in_tunnel = false;
03352 
03353     if (p->do_dechunking || p->do_chunked_passthru) {
03354       if (p->chunked_handler.truncation) {
03355         tunnel.abort_cache_write_finish_others(p);
03356       } else {
03357         tunnel.local_finish_all(p);
03358       }
03359     }
03360     // Initiate another read to watch catch aborts and
03361     //   timeouts
03362     ua_entry->vc_handler = &HttpSM::state_watch_for_client_abort;
03363     ua_entry->read_vio = p->vc->do_io_read(this, INT64_MAX, ua_buffer_reader->mbuf);
03364     break;
03365   default:
03366     ink_release_assert(0);
03367   }
03368 
03369   return 0;
03370 }
03371 
03372 //YTS Team, yamsat Plugin
03373 //Tunnel handler to deallocate the tunnel buffers and
03374 //set redirect_in_process=false
03375 //Copy partial POST data to buffers. Check for the various parameters including
03376 //the maximum configured post data size
03377 int
03378 HttpSM::tunnel_handler_for_partial_post(int event, void * /* data ATS_UNUSED */)
03379 {
03380   STATE_ENTER(&HttpSM::tunnel_handler_for_partial_post, event);
03381   tunnel.deallocate_buffers();
03382   tunnel.reset();
03383 
03384   tunnel.allocate_redirect_postdata_producer_buffer();
03385 
03386   t_state.redirect_info.redirect_in_process = false;
03387 
03388   if (post_failed) {
03389     post_failed = false;
03390     handle_post_failure();
03391   } else
03392     do_setup_post_tunnel(HTTP_SERVER_VC);
03393 
03394   return 0;
03395 }
03396 
03397 int
03398 HttpSM::tunnel_handler_post_server(int event, HttpTunnelConsumer * c)
03399 {
03400   STATE_ENTER(&HttpSM::tunnel_handler_post_server, event);
03401 
03402   server_request_body_bytes = c->bytes_written;
03403 
03404   switch (event) {
03405   case VC_EVENT_EOS:
03406   case VC_EVENT_ERROR:
03407   case VC_EVENT_INACTIVITY_TIMEOUT:
03408   case VC_EVENT_ACTIVE_TIMEOUT:
03409     //  Did not complete post tunneling
03410     //
03411     //    In the http case, we don't want to close
03412     //    the connection because the
03413     //    destroys the header buffer which may
03414     //    a response even though the tunnel failed.
03415 
03416     // Shutdown both sides of the connection.  This prevents us
03417     //  from getting any further events and signals to client
03418     //  that POST data will not be forwarded to the server.  Doing
03419     //  shutdown on the write side will likely generate a TCP
03420     //  reset to the client but if the proxy wasn't here this is
03421     //  exactly what would happen.
03422     // we should wait to shutdown read side of the
03423     // client to prevent sending a reset
03424     server_entry->eos = true;
03425     c->vc->do_io_shutdown(IO_SHUTDOWN_WRITE);
03426 
03427     // We may be reading from a transform.  In that case, we
03428     //   want to close the transform
03429     HttpTunnelProducer *ua_producer;
03430     if (c->producer->vc_type == HT_TRANSFORM) {
03431       if (c->producer->handler_state == HTTP_SM_TRANSFORM_OPEN) {
03432         ink_assert(c->producer->vc == post_transform_info.vc);
03433         c->producer->vc->do_io_close();
03434         c->producer->alive = false;
03435         c->producer->self_consumer->alive = false;
03436       }
03437       ua_producer = c->producer->self_consumer->producer;
03438     } else {
03439       ua_producer = c->producer;
03440     }
03441     ink_assert(ua_producer->vc_type == HT_HTTP_CLIENT);
03442     ink_assert(ua_producer->vc == ua_session);
03443     ink_assert(ua_producer->vc == ua_entry->vc);
03444 
03445     // Before shutting down, initiate another read
03446     //  on the user agent in order to get timeouts
03447     //  coming to the state machine and not the tunnel
03448     ua_entry->vc_handler = &HttpSM::state_watch_for_client_abort;
03449 
03450     //YTS Team, yamsat Plugin
03451     //When event is VC_EVENT_ERROR,and when redirection is enabled
03452     //do not shut down the client read
03453     if (enable_redirection) {
03454       if (ua_producer->vc_type == HT_STATIC && event != VC_EVENT_ERROR && event != VC_EVENT_EOS) {
03455         ua_entry->read_vio = ua_producer->vc->do_io_read(this, INT64_MAX, c->producer->read_buffer);
03456         //ua_producer->vc->do_io_shutdown(IO_SHUTDOWN_READ);
03457         t_state.client_info.pipeline_possible = false;
03458       } else {
03459         if (ua_producer->vc_type == HT_STATIC && t_state.redirect_info.redirect_in_process) {
03460           post_failed = true;
03461         }
03462       }
03463     } else {
03464       ua_entry->read_vio = ua_producer->vc->do_io_read(this, INT64_MAX, c->producer->read_buffer);
03465       // we should not shutdown read side of the client here to prevent sending a reset
03466       //ua_producer->vc->do_io_shutdown(IO_SHUTDOWN_READ);
03467       t_state.client_info.pipeline_possible = false;
03468     }                           //end of added logic
03469 
03470     // We want to shutdown the tunnel here and see if there
03471     //   is a response on from the server.  Mark the user
03472     //   agent as down so that tunnel concludes.
03473     ua_producer->alive = false;
03474     ua_producer->handler_state = HTTP_SM_POST_SERVER_FAIL;
03475     ink_assert(tunnel.is_tunnel_alive() == false);
03476     break;
03477 
03478   case VC_EVENT_WRITE_COMPLETE:
03479     // Completed successfully
03480     c->write_success = true;
03481     break;
03482   default:
03483     ink_release_assert(0);
03484   }
03485 
03486   return 0;
03487 }
03488 
03489 int
03490 HttpSM::tunnel_handler_ssl_producer(int event, HttpTunnelProducer * p)
03491 {
03492 
03493   STATE_ENTER(&HttpSM::tunnel_handler_ssl_producer, event);
03494 
03495   switch (event) {
03496   case VC_EVENT_EOS:
03497     // The write side of this connection is still alive
03498     //  so half-close the read
03499     if (p->self_consumer->alive) {
03500       p->vc->do_io_shutdown(IO_SHUTDOWN_READ);
03501       tunnel.local_finish_all(p);
03502       break;
03503     }
03504     // FALL THROUGH - both sides of the tunnel are dea
03505   case VC_EVENT_ERROR:
03506   case VC_EVENT_INACTIVITY_TIMEOUT:
03507   case VC_EVENT_ACTIVE_TIMEOUT:
03508     // The other side of the connection is either already dead
03509     //   or rendered inoperative by the error on the connection
03510     //   Note: use tunnel close vc so the tunnel knows we are
03511     //    nuking the of the connection as well
03512     tunnel.close_vc(p);
03513     tunnel.local_finish_all(p);
03514 
03515     // Because we've closed the net vc this error came in, it's write
03516     //  direction is now dead as well.  If that side still being fed data,
03517     //  we need to kill that pipe as well
03518     if (p->self_consumer->producer->alive) {
03519       p->self_consumer->producer->alive = false;
03520       if (p->self_consumer->producer->self_consumer->alive) {
03521         p->self_consumer->producer->vc->do_io_shutdown(IO_SHUTDOWN_READ);
03522       } else {
03523         tunnel.close_vc(p->self_consumer->producer);
03524       }
03525     }
03526     break;
03527   case VC_EVENT_READ_COMPLETE:
03528   case HTTP_TUNNEL_EVENT_PRECOMPLETE:
03529     // We should never get these event since we don't know
03530     //  how long the stream is
03531   default:
03532     ink_release_assert(0);
03533   }
03534 
03535   // Update stats
03536   switch (p->vc_type) {
03537   case HT_HTTP_SERVER:
03538     server_response_body_bytes += p->bytes_read;
03539     break;
03540   case HT_HTTP_CLIENT:
03541     client_request_body_bytes += p->bytes_read;
03542     break;
03543   default:
03544     // Covered here:
03545     // HT_CACHE_READ, HT_CACHE_WRITE,
03546     // HT_TRANSFORM, HT_STATIC.
03547     break;
03548   }
03549 
03550   return 0;
03551 }
03552 
03553 int
03554 HttpSM::tunnel_handler_ssl_consumer(int event, HttpTunnelConsumer * c)
03555 {
03556   STATE_ENTER(&HttpSM::tunnel_handler_ssl_consumer, event);
03557 
03558   switch (event) {
03559   case VC_EVENT_ERROR:
03560   case VC_EVENT_EOS:
03561   case VC_EVENT_INACTIVITY_TIMEOUT:
03562   case VC_EVENT_ACTIVE_TIMEOUT:
03563     // we need to mark the producer dead
03564     // otherwise it can stay alive forever.
03565     if (c->producer->alive) {
03566       c->producer->alive = false;
03567       if (c->producer->self_consumer->alive) {
03568         c->producer->vc->do_io_shutdown(IO_SHUTDOWN_READ);
03569       } else {
03570         tunnel.close_vc(c->producer);
03571       }
03572     }
03573     // Since we are changing the state of the self_producer
03574     //  we must have the tunnel shutdown the vc
03575     tunnel.close_vc(c);
03576     tunnel.local_finish_all(c->self_producer);
03577     break;
03578 
03579   case VC_EVENT_WRITE_COMPLETE:
03580     // If we get this event, it means that the producer
03581     //  has finished and we wrote the remaining data
03582     //  to the consumer
03583     //
03584     // If the read side of this connection has not yet
03585     //  closed, do a write half-close and then wait for
03586     //  read side to close so that we don't cut off
03587     //  pipelined responses with TCP resets
03588     //
03589     ink_assert(c->producer->alive == false);
03590     c->write_success = true;
03591     if (c->self_producer->alive == true) {
03592       c->vc->do_io_shutdown(IO_SHUTDOWN_WRITE);
03593       if (!c->producer->alive) {
03594         tunnel.close_vc(c);
03595         tunnel.local_finish_all(c->self_producer);
03596         break;
03597       }
03598     } else {
03599       c->vc->do_io_close();
03600     }
03601     break;
03602 
03603   default:
03604     ink_release_assert(0);
03605   }
03606 
03607   // Update stats
03608   switch (c->vc_type) {
03609   case HT_HTTP_SERVER:
03610     server_request_body_bytes += c->bytes_written;
03611     break;
03612   case HT_HTTP_CLIENT:
03613     client_response_body_bytes += c->bytes_written;
03614     break;
03615   default:
03616     // Handled here:
03617     // HT_CACHE_READ, HT_CACHE_WRITE, HT_TRANSFORM,
03618     // HT_STATIC
03619     break;
03620   }
03621 
03622   return 0;
03623 }
03624 
03625 int
03626 HttpSM::tunnel_handler_transform_write(int event, HttpTunnelConsumer * c)
03627 {
03628   STATE_ENTER(&HttpSM::tunnel_handler_transform_write, event);
03629 
03630   HttpTransformInfo *i;
03631 
03632   // Figure out if this the request or response transform
03633   // : use post_transform_info.entry because post_transform_info.vc
03634   // is not set to NULL after the post transform is done.
03635   if (post_transform_info.entry) {
03636     i = &post_transform_info;
03637     ink_assert(c->vc == i->entry->vc);
03638   } else {
03639     i = &transform_info;
03640     ink_assert(c->vc == i->vc);
03641     ink_assert(c->vc == i->entry->vc);
03642   }
03643 
03644   switch (event) {
03645   case VC_EVENT_ERROR:
03646     // Transform error
03647     tunnel.chain_abort_all(c->producer);
03648     c->handler_state = HTTP_SM_TRANSFORM_FAIL;
03649     c->vc->do_io_close(EHTTP_ERROR);
03650     break;
03651   case VC_EVENT_EOS:
03652     //   It possible the transform quit
03653     //   before the producer finished.  If this is true
03654     //   we need shut  down the producer if it doesn't
03655     //   have other consumers to serve or else it will
03656     //   fill up buffer and get hung
03657     if (c->producer->alive && c->producer->num_consumers == 1) {
03658       // Send a tunnel detach event to the producer
03659       //   to shut it down but indicates it should not abort
03660       //   downstream (on the other side of the transform)
03661       //   cache writes
03662       tunnel.producer_handler(HTTP_TUNNEL_EVENT_CONSUMER_DETACH, c->producer);
03663     }
03664     // FALLTHROUGH
03665   case VC_EVENT_WRITE_COMPLETE:
03666     // write to transform complete - shutdown the write side
03667     c->write_success = true;
03668     c->vc->do_io_shutdown(IO_SHUTDOWN_WRITE);
03669 
03670     // If the read side has not started up yet, then the
03671     //  this transform_vc is no longer owned by the tunnel
03672     if (c->self_producer == NULL) {
03673       i->entry->in_tunnel = false;
03674     } else if (c->self_producer->alive == false) {
03675       // The read side of the Transform
03676       //   has already completed (possible when the
03677       //   transform intentionally truncates the response).
03678       //   So close it
03679       c->vc->do_io(VIO::CLOSE);
03680     }
03681     break;
03682   default:
03683     ink_release_assert(0);
03684   }
03685 
03686   // attribute the size written to the transform from various sources
03687   // NOTE: the range transform is excluded from this accounting and
03688   // is instead handled in HttpSM::tunnel_handler_ua
03689   //
03690   // the reasoning is that the range transform is internal functionality
03691   // in support of HTTP 1.1 compliance, therefore part of "normal" operation
03692   // all other transforms are plugin driven and the difference between
03693   // source data and final data should represent the transformation delta
03694   //
03695   if (t_state.range_setup == HttpTransact::RANGE_NONE) {
03696     switch (t_state.pre_transform_source) {
03697     case HttpTransact::SOURCE_HTTP_ORIGIN_SERVER:
03698       server_response_body_bytes = client_response_body_bytes;
03699       break;
03700     case HttpTransact::SOURCE_CACHE:
03701       cache_response_body_bytes = client_response_body_bytes;
03702       break;
03703     default:
03704       break;
03705     }
03706   }
03707 
03708   return 0;
03709 }
03710 
03711 int
03712 HttpSM::tunnel_handler_transform_read(int event, HttpTunnelProducer * p)
03713 {
03714   STATE_ENTER(&HttpSM::tunnel_handler_transform_read, event);
03715 
03716   ink_assert(p->vc == transform_info.vc || p->vc == post_transform_info.vc);
03717 
03718   switch (event) {
03719   case VC_EVENT_ERROR:
03720     // Transform error
03721     tunnel.chain_abort_all(p->self_consumer->producer);
03722     break;
03723   case VC_EVENT_EOS:
03724     // If we did not get enough data from the transform abort the
03725     //    cache write otherwise fallthrough to the transform
03726     //    completing successfully
03727     if (t_state.hdr_info.transform_response_cl != HTTP_UNDEFINED_CL &&
03728         p->read_vio->nbytes < t_state.hdr_info.transform_response_cl) {
03729       tunnel.abort_cache_write_finish_others(p);
03730       break;
03731     }
03732     // FALL-THROUGH
03733   case VC_EVENT_READ_COMPLETE:
03734   case HTTP_TUNNEL_EVENT_PRECOMPLETE:
03735     // Transform complete
03736     p->read_success = true;
03737     tunnel.local_finish_all(p);
03738     break;
03739   default:
03740     ink_release_assert(0);
03741   }
03742 
03743   // it's possible that the write side of the
03744   //  transform hasn't detached yet.  If it is still alive,
03745   //  don't close the transform vc
03746   if (p->self_consumer->alive == false) {
03747     p->vc->do_io_close();
03748   }
03749   p->handler_state = HTTP_SM_TRANSFORM_CLOSED;
03750 
03751   return 0;
03752 }
03753 
03754 int
03755 HttpSM::tunnel_handler_plugin_agent(int event, HttpTunnelConsumer * c)
03756 {
03757   STATE_ENTER(&HttpSM::tunnel_handler_plugin_client, event);
03758 
03759   switch (event) {
03760   case VC_EVENT_ERROR:
03761     c->vc->do_io_close(EHTTP_ERROR); // close up
03762     // Signal producer if we're the last consumer.
03763     if (c->producer->alive && c->producer->num_consumers == 1) {
03764       tunnel.producer_handler(HTTP_TUNNEL_EVENT_CONSUMER_DETACH, c->producer);
03765     }
03766     break;
03767   case VC_EVENT_EOS:
03768     if (c->producer->alive && c->producer->num_consumers == 1) {
03769       tunnel.producer_handler(HTTP_TUNNEL_EVENT_CONSUMER_DETACH, c->producer);
03770     }
03771     // FALLTHROUGH
03772   case VC_EVENT_WRITE_COMPLETE:
03773     c->write_success = true;
03774     c->vc->do_io(VIO::CLOSE);
03775     break;
03776   default:
03777     ink_release_assert(0);
03778   }
03779 
03780   return 0;
03781 }
03782 
03783 int
03784 HttpSM::state_srv_lookup(int event, void *data)
03785 {
03786   STATE_ENTER(&HttpSM::state_srv_lookup, event);
03787 
03788   ink_assert(t_state.req_flavor == HttpTransact::REQ_FLAVOR_SCHEDULED_UPDATE ||
03789              t_state.req_flavor == HttpTransact::REQ_FLAVOR_REVPROXY || ua_entry->vc != NULL);
03790 
03791   switch (event) {
03792   case EVENT_SRV_LOOKUP:
03793     pending_action = NULL;
03794     process_srv_info((HostDBInfo *) data);
03795     break;
03796   case EVENT_SRV_IP_REMOVED:
03797     ink_assert(!"Unexpected SRV event from HostDB. What up, Eric?");
03798     break;
03799   default:
03800     ink_assert(!"Unexpected event");
03801   }
03802 
03803   return 0;
03804 }
03805 
03806 int
03807 HttpSM::state_remap_request(int event, void * /* data ATS_UNUSED */)
03808 {
03809   STATE_ENTER(&HttpSM::state_remap_request, event);
03810 
03811   switch (event) {
03812   case EVENT_REMAP_ERROR:
03813     {
03814       ink_assert(!"this doesn't happen");
03815       pending_action = NULL;
03816       Error("error remapping request [see previous errors]");
03817       call_transact_and_set_next_state(HttpTransact::HandleRequest);    //HandleRequest skips EndRemapRequest
03818       break;
03819     }
03820 
03821   case EVENT_REMAP_COMPLETE:
03822     {
03823       pending_action = NULL;
03824       DebugSM("url_rewrite", "completed processor-based remapping request for [%" PRId64 "]", sm_id);
03825       t_state.url_remap_success = remapProcessor.finish_remap(&t_state);
03826       call_transact_and_set_next_state(NULL);
03827       break;
03828     }
03829 
03830   default:
03831     ink_assert("Unexpected event inside state_remap_request");
03832     break;
03833   }
03834 
03835   return 0;
03836 }
03837 
03838 void
03839 HttpSM::do_remap_request(bool run_inline)
03840 {
03841   DebugSM("http_seq", "[HttpSM::do_remap_request] Remapping request");
03842   DebugSM("url_rewrite", "Starting a possible remapping for request [%" PRId64 "]", sm_id);
03843 
03844   bool ret = false;
03845   if (t_state.cop_test_page == false) {
03846     ret = remapProcessor.setup_for_remap(&t_state);
03847   }
03848 
03849   // Preserve pristine url before remap
03850   // This needs to be done after the Host: header for reverse proxy is added to the url, but
03851   // before we return from this function for forward proxy
03852   t_state.pristine_url.create(t_state.hdr_info.client_request.url_get()->m_heap);
03853   t_state.pristine_url.copy(t_state.hdr_info.client_request.url_get());
03854 
03855   if (!ret) {
03856     DebugSM("url_rewrite", "Could not find a valid remapping entry for this request [%" PRId64 "]", sm_id);
03857     if (!run_inline) {
03858       handleEvent(EVENT_REMAP_COMPLETE, NULL);
03859     }
03860     return;
03861   }
03862 
03863   DebugSM("url_rewrite", "Found a remap map entry for [%" PRId64 "], attempting to remap request and call any plugins", sm_id);
03864   Action *remap_action_handle = remapProcessor.perform_remap(this, &t_state);
03865 
03866   if (remap_action_handle != ACTION_RESULT_DONE) {
03867     DebugSM("url_rewrite", "Still more remapping needed for [%" PRId64 "]", sm_id);
03868     ink_assert(!pending_action);
03869     historical_action = pending_action = remap_action_handle;
03870   }
03871 
03872   return;
03873 }
03874 
03875 void
03876 HttpSM::do_hostdb_lookup()
03877 {
03878 /*
03879     //////////////////////////////////////////
03880     // if a connection to the origin server //
03881     // is currently opened --- close it.    //
03882     //////////////////////////////////////////
03883     if (m_origin_server_vc != 0) {
03884    origin_server_close(CLOSE_CONNECTION);
03885    if (m_response_body_tunnel_buffer_.buf() != 0)
03886        m_response_body_tunnel_buffer_.reset();
03887     }
03888     */
03889 
03890   ink_assert(t_state.dns_info.lookup_name != NULL);
03891   ink_assert(pending_action == NULL);
03892 
03893   milestones.dns_lookup_begin = ink_get_hrtime();
03894   bool use_srv_records = t_state.srv_lookup;
03895 
03896   if (use_srv_records) {
03897     char d[MAXDNAME];
03898 
03899     memcpy(d, "_http._tcp.", 11); // don't copy '\0'
03900     ink_strlcpy(d + 11, t_state.server_info.name, sizeof(d) - 11 ); // all in the name of performance!
03901 
03902     DebugSM("dns_srv", "Beginning lookup of SRV records for origin %s", d);
03903 
03904     HostDBProcessor::Options opt;
03905     if (t_state.api_txn_dns_timeout_value != -1)
03906       opt.timeout = t_state.api_txn_dns_timeout_value;
03907     Action *srv_lookup_action_handle =
03908       hostDBProcessor.getSRVbyname_imm(this, (process_srv_info_pfn) & HttpSM::process_srv_info, d, 0, opt);
03909 
03910     if (srv_lookup_action_handle != ACTION_RESULT_DONE) {
03911       ink_assert(!pending_action);
03912       pending_action = srv_lookup_action_handle;
03913       historical_action = pending_action;
03914     } else {
03915       char *host_name = t_state.dns_info.srv_lookup_success ? t_state.dns_info.srv_hostname : t_state.dns_info.lookup_name;
03916       opt.port = t_state.dns_info.srv_lookup_success ? t_state.dns_info.srv_port : t_state.server_info.port;
03917       opt.flags = (t_state.cache_info.directives.does_client_permit_dns_storing)
03918             ? HostDBProcessor::HOSTDB_DO_NOT_FORCE_DNS
03919             : HostDBProcessor::HOSTDB_FORCE_DNS_RELOAD
03920           ;
03921       opt.timeout = (t_state.api_txn_dns_timeout_value != -1) ? t_state.api_txn_dns_timeout_value : 0;
03922       opt.host_res_style = ua_session->host_res_style;
03923 
03924       Action *dns_lookup_action_handle = hostDBProcessor.getbyname_imm(this,
03925                                                                  (process_hostdb_info_pfn) & HttpSM::
03926                                                                  process_hostdb_info,
03927                                                                  host_name, 0,
03928                                                                  opt);
03929       if (dns_lookup_action_handle != ACTION_RESULT_DONE) {
03930         ink_assert(!pending_action);
03931         pending_action = dns_lookup_action_handle;
03932         historical_action = pending_action;
03933       } else {
03934         call_transact_and_set_next_state(NULL);
03935       }
03936     }
03937     return;
03938   } else {                      /* we aren't using SRV stuff... */
03939     DebugSM("http_seq", "[HttpSM::do_hostdb_lookup] Doing DNS Lookup");
03940 
03941     // If there is not a current server, we must be looking up the origin
03942     //  server at the beginning of the transaction
03943     int server_port = t_state.current.server ? t_state.current.server->port : t_state.server_info.port;
03944 
03945     if (t_state.api_txn_dns_timeout_value != -1) {
03946       DebugSM("http_timeout", "beginning DNS lookup. allowing %d mseconds for DNS lookup",
03947             t_state.api_txn_dns_timeout_value);
03948     }
03949 
03950     HostDBProcessor::Options opt;
03951     opt.port = server_port;
03952     opt.flags = (t_state.cache_info.directives.does_client_permit_dns_storing)
03953       ? HostDBProcessor::HOSTDB_DO_NOT_FORCE_DNS
03954       : HostDBProcessor::HOSTDB_FORCE_DNS_RELOAD
03955     ;
03956     opt.timeout = (t_state.api_txn_dns_timeout_value != -1) ? t_state.api_txn_dns_timeout_value : 0;
03957     opt.host_res_style = ua_session->host_res_style;
03958 
03959     Action *dns_lookup_action_handle = hostDBProcessor.getbyname_imm(this, (process_hostdb_info_pfn) & HttpSM::process_hostdb_info, t_state.dns_info.lookup_name, 0, opt);
03960 
03961     if (dns_lookup_action_handle != ACTION_RESULT_DONE) {
03962       ink_assert(!pending_action);
03963       pending_action = dns_lookup_action_handle;
03964       historical_action = pending_action;
03965     } else {
03966       call_transact_and_set_next_state(NULL);
03967     }
03968     return;
03969   }
03970   ink_assert(!"not reached");
03971   return;
03972 }
03973 
03974 void
03975 HttpSM::do_hostdb_reverse_lookup()
03976 {
03977   ink_assert(t_state.dns_info.lookup_name != NULL);
03978   ink_assert(pending_action == NULL);
03979 
03980   DebugSM("http_seq", "[HttpSM::do_hostdb_reverse_lookup] Doing reverse DNS Lookup");
03981 
03982   IpEndpoint addr;
03983   ats_ip_pton(t_state.dns_info.lookup_name, &addr.sa);
03984   Action *dns_lookup_action_handle = hostDBProcessor.getbyaddr_re(this, &addr.sa);
03985 
03986   if (dns_lookup_action_handle != ACTION_RESULT_DONE) {
03987     ink_assert(!pending_action);
03988     pending_action = dns_lookup_action_handle;
03989     historical_action = pending_action;
03990   }
03991   return;
03992 }
03993 
03994 void
03995 HttpSM::do_hostdb_update_if_necessary()
03996 {
03997   int issue_update = 0;
03998 
03999   if (t_state.current.server == NULL || plugin_tunnel_type != HTTP_NO_PLUGIN_TUNNEL) {
04000     // No server, so update is not necessary
04001     return;
04002   }
04003   // If we failed back over to the origin server, we don't have our
04004   //   hostdb information anymore which means we shouldn't update the hostdb
04005   if (!ats_ip_addr_eq(&t_state.current.server->addr.sa, t_state.host_db_info.ip())) {
04006     DebugSM("http", "[%" PRId64 "] skipping hostdb update due to server failover", sm_id);
04007     return;
04008   }
04009 
04010   if (t_state.updated_server_version != HostDBApplicationInfo::HTTP_VERSION_UNDEFINED) {
04011     // we may have incorrectly assumed that the hostdb had the wrong version of
04012     // http for the server because our first few connect attempts to the server
04013     // failed, causing us to downgrade our requests to a lower version and changing
04014     // our information about the server version.
04015     //
04016     // This test therefore just issues the update only if the hostdb version is
04017     // in fact different from the version we want the value to be updated to.
04018     if (t_state.host_db_info.app.http_data.http_version != t_state.updated_server_version) {
04019       t_state.host_db_info.app.http_data.http_version = t_state.updated_server_version;
04020       issue_update |= 1;
04021     }
04022 
04023     t_state.updated_server_version = HostDBApplicationInfo::HTTP_VERSION_UNDEFINED;
04024   }
04025   // Check to see if we need to report or clear a connection failure
04026   if (t_state.current.server->had_connect_fail()) {
04027     issue_update |= 1;
04028     mark_host_failure(&t_state.host_db_info, t_state.client_request_time);
04029   } else {
04030     if (t_state.host_db_info.app.http_data.last_failure != 0) {
04031       t_state.host_db_info.app.http_data.last_failure = 0;
04032       ats_ip_port_cast(&t_state.current.server->addr) = htons(t_state.current.server->port);
04033       issue_update |= 1;
04034       char addrbuf[INET6_ADDRPORTSTRLEN];
04035       DebugSM("http", "[%" PRId64 "] hostdb update marking IP: %s as up",
04036             sm_id,
04037             ats_ip_nptop(&t_state.current.server->addr.sa, addrbuf, sizeof(addrbuf)));
04038     }
04039 
04040     if (t_state.dns_info.srv_lookup_success && t_state.dns_info.srv_app.http_data.last_failure != 0) {
04041       t_state.dns_info.srv_app.http_data.last_failure = 0;
04042       hostDBProcessor.setby_srv(t_state.dns_info.lookup_name, 0, t_state.dns_info.srv_hostname, &t_state.dns_info.srv_app);
04043       DebugSM("http", "[%" PRId64 "] hostdb update marking SRV: %s as up",
04044                   sm_id,
04045                   t_state.dns_info.srv_hostname);
04046     }
04047   }
04048 
04049   if (issue_update) {
04050     hostDBProcessor.setby(t_state.current.server->name,
04051       strlen(t_state.current.server->name),
04052       &t_state.current.server->addr.sa,
04053       &t_state.host_db_info.app
04054     );
04055   }
04056 
04057   char addrbuf[INET6_ADDRPORTSTRLEN];
04058   DebugSM("http", "server info = %s", ats_ip_nptop(&t_state.current.server->addr.sa, addrbuf, sizeof(addrbuf)));
04059   return;
04060 }
04061 
04062 /*
04063  * range entry valid [a,b] (a >= 0 and b >= 0 and a <= b)
04064  * HttpTransact::RANGE_NONE if the content length of cached copy is zero or
04065  * no range entry
04066  * HttpTransact::RANGE_NOT_SATISFIABLE iff all range entries are valid but
04067  * none overlap the current extent of the cached copy
04068  * HttpTransact::RANGE_NOT_HANDLED if out-of-order Range entries or
04069  * the cached copy`s content_length is INT64_MAX (e.g. read_from_writer and trunked)
04070  * HttpTransact::RANGE_REQUESTED if all sub range entries are valid and
04071  * in order (remove the entries that not overlap the extent of cache copy)
04072  */
04073 void
04074 HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
04075 {
04076   int prev_good_range = -1;
04077   const char *value;
04078   int value_len;
04079   int n_values;
04080   int nr = 0; // number of valid ranges, also index to range array.
04081   int not_satisfy = 0;
04082   HdrCsvIter csv;
04083   const char *s, *e, *tmp;
04084   RangeRecord *ranges = NULL;
04085   int64_t start, end;
04086 
04087   ink_assert(field != NULL && t_state.range_setup == HttpTransact::RANGE_NONE && t_state.ranges == NULL);
04088 
04089   if (content_length <= 0)
04090     return;
04091 
04092   // ToDo: Can this really happen?
04093   if (content_length == INT64_MAX) {
04094     t_state.range_setup = HttpTransact::RANGE_NOT_HANDLED;
04095     return;
04096   }
04097 
04098   n_values = 0;
04099   value = csv.get_first(field, &value_len);
04100   while (value) {
04101     ++n_values;
04102     value = csv.get_next(&value_len);
04103   }
04104 
04105   value = csv.get_first(field, &value_len);
04106   if (n_values <= 0 || ptr_len_ncmp(value, value_len, "bytes=", 6))
04107     return;
04108 
04109   ranges = new RangeRecord[n_values];
04110   value += 6; // skip leading 'bytes='
04111   value_len -= 6;
04112 
04113   for (; value; value = csv.get_next(&value_len)) {
04114     if (!(tmp = (const char *) memchr(value, '-', value_len))) {
04115       t_state.range_setup = HttpTransact::RANGE_NONE;
04116       goto Lfaild;
04117     }
04118 
04119     // process start value
04120     s = value;
04121     e = tmp;
04122     // skip leading white spaces
04123     for (; s < e && ParseRules::is_ws(*s); ++s) ;
04124 
04125     if (s >= e)
04126       start = -1;
04127     else {
04128       for (start = 0; s < e && *s >= '0' && *s <= '9'; ++s)
04129         start = start * 10 + (*s - '0');
04130       // skip last white spaces
04131       for (; s < e && ParseRules::is_ws(*s); ++s) ;
04132 
04133       if (s < e || start < 0) {
04134         t_state.range_setup = HttpTransact::RANGE_NONE;
04135         goto Lfaild;
04136       }
04137     }
04138 
04139     // process end value
04140     s = tmp + 1;
04141     e = value + value_len;
04142     // skip leading white spaces
04143     for (; s < e && ParseRules::is_ws(*s); ++s) ;
04144 
04145     if (s >= e) {
04146       if (start < 0) {
04147         t_state.range_setup = HttpTransact::RANGE_NONE;
04148         goto Lfaild;
04149       } else if (start >= content_length) {
04150         not_satisfy++;
04151         continue;
04152       }
04153       end = content_length - 1;
04154     } else {
04155       for (end = 0; s < e && *s >= '0' && *s <= '9'; ++s)
04156         end = end * 10 + (*s - '0');
04157       // skip last white spaces
04158       for (; s < e && ParseRules::is_ws(*s); ++s) ;
04159 
04160       if (s < e || end < 0) {
04161         t_state.range_setup = HttpTransact::RANGE_NONE;
04162         goto Lfaild;
04163       }
04164 
04165       if (start < 0) {
04166         if (end >= content_length)
04167           end = content_length;
04168         start = content_length - end;
04169         end = content_length - 1;
04170       } else if (start >= content_length && start <= end) {
04171         not_satisfy++;
04172         continue;
04173       }
04174 
04175       if (end >= content_length)
04176         end = content_length - 1;
04177     }
04178 
04179     if (start > end) {
04180       t_state.range_setup = HttpTransact::RANGE_NONE;
04181       goto Lfaild;
04182     }
04183 
04184     if (prev_good_range >= 0 && start <= ranges[prev_good_range]._end) {
04185       t_state.range_setup = HttpTransact::RANGE_NOT_HANDLED;
04186       goto Lfaild;
04187     }
04188 
04189     ink_assert(start >= 0 && end >= 0 && start < content_length && end < content_length);
04190 
04191     prev_good_range = nr;
04192     ranges[nr]._start = start;
04193     ranges[nr]._end = end;
04194     ++nr;
04195   }
04196 
04197   if (nr > 0) {
04198     t_state.range_setup = HttpTransact::RANGE_REQUESTED;
04199     t_state.ranges = ranges;
04200     t_state.num_range_fields = nr;
04201     return;
04202   }
04203 
04204   if (not_satisfy)
04205     t_state.range_setup = HttpTransact::RANGE_NOT_SATISFIABLE;
04206 
04207 Lfaild:
04208   t_state.num_range_fields = -1;
04209   delete []ranges;
04210   return;
04211 }
04212 
04213 void
04214 HttpSM::calculate_output_cl(int64_t content_length, int64_t num_chars)
04215 {
04216   int i;
04217 
04218   if (t_state.range_setup != HttpTransact::RANGE_REQUESTED &&
04219       t_state.range_setup != HttpTransact::RANGE_NOT_TRANSFORM_REQUESTED)
04220     return;
04221 
04222   ink_assert(t_state.ranges);
04223 
04224   if (t_state.num_range_fields == 1) {
04225     t_state.range_output_cl = t_state.ranges[0]._end - t_state.ranges[0]._start + 1;
04226   }
04227   else {
04228     for (i = 0; i < t_state.num_range_fields; i++) {
04229       if (t_state.ranges[i]._start >= 0) {
04230         t_state.range_output_cl += boundary_size;
04231         t_state.range_output_cl += sub_header_size + content_length;
04232         t_state.range_output_cl += num_chars_for_int(t_state.ranges[i]._start)
04233           + num_chars_for_int(t_state.ranges[i]._end) + num_chars + 2;
04234         t_state.range_output_cl += t_state.ranges[i]._end - t_state.ranges[i]._start + 1;
04235         t_state.range_output_cl += 2;
04236       }
04237     }
04238 
04239     t_state.range_output_cl += boundary_size + 2;
04240   }
04241 
04242   Debug("http_range", "Pre-calculated Content-Length for Range response is %" PRId64, t_state.range_output_cl);
04243 }
04244 
04245 void
04246 HttpSM::do_range_parse(MIMEField *range_field)
04247 {
04248 
04249   //bool res = false;
04250   
04251   int64_t content_length   = t_state.cache_info.object_read->object_size_get();
04252   int64_t num_chars_for_cl = num_chars_for_int(content_length);
04253   
04254   parse_range_and_compare(range_field, content_length);
04255   calculate_output_cl(content_length, num_chars_for_cl);
04256 }
04257 
04258 // this function looks for any Range: headers, parses them and either
04259 // sets up a transform processor to handle the request OR defers to the 
04260 // HttpTunnel
04261 void
04262 HttpSM::do_range_setup_if_necessary()
04263 {
04264   MIMEField *field;
04265   INKVConnInternal *range_trans;
04266   int field_content_type_len = -1;
04267   const char * content_type;
04268   
04269   ink_assert(t_state.cache_info.object_read != NULL);
04270   
04271   field = t_state.hdr_info.client_request.field_find(MIME_FIELD_RANGE, MIME_LEN_RANGE);
04272   ink_assert(field != NULL);
04273   
04274   t_state.range_setup = HttpTransact::RANGE_NONE;
04275 
04276   if (t_state.method == HTTP_WKSIDX_GET && t_state.hdr_info.client_request.version_get() == HTTPVersion(1, 1)) {
04277     do_range_parse(field);
04278 
04279     // if only one range entry and pread is capable, no need transform range
04280     if (t_state.range_setup == HttpTransact::RANGE_REQUESTED &&
04281         t_state.num_range_fields == 1 &&
04282         cache_sm.cache_read_vc->is_pread_capable())
04283       t_state.range_setup = HttpTransact::RANGE_NOT_TRANSFORM_REQUESTED;
04284 
04285     if (t_state.range_setup == HttpTransact::RANGE_REQUESTED && 
04286         api_hooks.get(TS_HTTP_RESPONSE_TRANSFORM_HOOK) == NULL) {
04287       Debug("http_trans", "Unable to accelerate range request, fallback to transform");
04288       content_type = t_state.cache_info.object_read->response_get()->value_get(MIME_FIELD_CONTENT_TYPE, MIME_LEN_CONTENT_TYPE, &field_content_type_len);
04289       //create a Range: transform processor for requests of type Range: bytes=1-2,4-5,10-100 (eg. multiple ranges)
04290       range_trans = transformProcessor.range_transform(mutex,
04291           t_state.ranges,
04292           t_state.num_range_fields,
04293           &t_state.hdr_info.transform_response,
04294           content_type,
04295           field_content_type_len,
04296           t_state.cache_info.object_read->object_size_get()
04297           );
04298       api_hooks.append(TS_HTTP_RESPONSE_TRANSFORM_HOOK, range_trans);
04299     }
04300   }
04301 }
04302 
04303 
04304 void
04305 HttpSM::do_cache_lookup_and_read()
04306 {
04307   // TODO decide whether to uncomment after finish testing redirect
04308   //ink_assert(server_session == NULL);
04309   ink_assert(pending_action == 0);
04310 
04311   HTTP_INCREMENT_TRANS_STAT(http_cache_lookups_stat);
04312 
04313   milestones.cache_open_read_begin = ink_get_hrtime();
04314   t_state.cache_lookup_result = HttpTransact::CACHE_LOOKUP_NONE;
04315   t_state.cache_info.lookup_count++;
04316   // YTS Team, yamsat Plugin
04317   // Changed the lookup_url to c_url which enables even
04318   // the new redirect url to perform a CACHE_LOOKUP
04319   URL *c_url;
04320   if (t_state.redirect_info.redirect_in_process)
04321     c_url = t_state.hdr_info.client_request.url_get();
04322   else
04323     c_url = t_state.cache_info.lookup_url;
04324 
04325   DebugSM("http_seq", "[HttpSM::do_cache_lookup_and_read] [%" PRId64 "] Issuing cache lookup for URL %s",  sm_id, c_url->string_get(&t_state.arena));
04326   Action *cache_action_handle = cache_sm.open_read(c_url,
04327                                                    &t_state.hdr_info.client_request,
04328                                                    &(t_state.cache_info.config),
04329                                                    (time_t) ((t_state.cache_control.pin_in_cache_for < 0) ?
04330                                                              0 : t_state.cache_control.pin_in_cache_for));
04331   //
04332   // pin_in_cache value is an open_write parameter.
04333   // It is passed in open_read to allow the cluster to
04334   // optimize the typical open_read/open_read failed/open_write
04335   // sequence.
04336   //
04337   if (cache_action_handle != ACTION_RESULT_DONE) {
04338     ink_assert(!pending_action);
04339     pending_action = cache_action_handle;
04340     historical_action = pending_action;
04341   }
04342   REMEMBER((long) pending_action, reentrancy_count);
04343 
04344   return;
04345 }
04346 
04347 void
04348 HttpSM::do_cache_delete_all_alts(Continuation * cont)
04349 {
04350   // Do not delete a non-existant object.
04351   ink_assert(t_state.cache_info.object_read);
04352 
04353   DebugSM("http_seq", "[HttpSM::do_cache_delete_all_alts] Issuing cache delete for %s",
04354           t_state.cache_info.lookup_url->string_get_ref());
04355 
04356   Action *cache_action_handle = NULL;
04357 
04358   cache_action_handle = cacheProcessor.remove(cont, t_state.cache_info.lookup_url, t_state.cache_control.cluster_cache_local);
04359   if (cont != NULL) {
04360     if (cache_action_handle != ACTION_RESULT_DONE) {
04361       ink_assert(!pending_action);
04362       pending_action = cache_action_handle;
04363       historical_action = pending_action;
04364     }
04365   }
04366 
04367   return;
04368 }
04369 
04370 inline void
04371 HttpSM::do_cache_prepare_write()
04372 {
04373   // statistically no need to retry when we are trying to lock
04374   // LOCK_URL_SECOND url because the server's behavior is unlikely to change
04375   milestones.cache_open_write_begin = ink_get_hrtime();
04376   bool retry = (t_state.api_lock_url == HttpTransact::LOCK_URL_FIRST);
04377   do_cache_prepare_action(&cache_sm, t_state.cache_info.object_read, retry);
04378 }
04379 
04380 inline void
04381 HttpSM::do_cache_prepare_write_transform()
04382 {
04383   if (cache_sm.cache_write_vc != NULL || tunnel.has_cache_writer())
04384     do_cache_prepare_action(&transform_cache_sm, NULL, false, true);
04385   else
04386     do_cache_prepare_action(&transform_cache_sm, NULL, false);
04387 }
04388 
04389 void
04390 HttpSM::do_cache_prepare_update()
04391 {
04392   if (t_state.cache_info.object_read != NULL &&
04393       t_state.cache_info.object_read->valid() &&
04394       t_state.cache_info.object_store.valid() &&
04395       t_state.cache_info.object_store.response_get() != NULL &&
04396       t_state.cache_info.object_store.response_get()->valid() && t_state.hdr_info.client_request.method_get_wksidx()
04397       == HTTP_WKSIDX_GET) {
04398     t_state.cache_info.object_store.request_set(t_state.cache_info.object_read->request_get());
04399     // t_state.cache_info.object_read = NULL;
04400     // cache_sm.close_read();
04401 
04402     t_state.transact_return_point = HttpTransact::HandleUpdateCachedObject;
04403     ink_assert(cache_sm.cache_write_vc == NULL);
04404     HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_cache_open_write);
04405     // don't retry read for update
04406     do_cache_prepare_action(&cache_sm, t_state.cache_info.object_read, false);
04407   } else {
04408     t_state.api_modifiable_cached_resp = false;
04409     call_transact_and_set_next_state(HttpTransact::HandleApiErrorJump);
04410   }
04411 }
04412 
04413 void
04414 HttpSM::do_cache_prepare_action(HttpCacheSM * c_sm, CacheHTTPInfo * object_read_info, bool retry, bool allow_multiple)
04415 {
04416   URL *o_url, *c_url, *s_url;
04417   bool restore_client_request = false;
04418 
04419   ink_assert(!pending_action);
04420   ink_assert(c_sm->cache_write_vc == NULL);
04421 
04422   if (t_state.api_lock_url == HttpTransact::LOCK_URL_FIRST) {
04423     if (t_state.redirect_info.redirect_in_process) {
04424       o_url = &(t_state.redirect_info.original_url);
04425       ink_assert(o_url->valid());
04426       restore_client_request = true;
04427       s_url = o_url;
04428     } else {
04429       o_url = &(t_state.cache_info.original_url);
04430       if (o_url->valid())
04431         s_url = o_url;
04432       else
04433         s_url = t_state.cache_info.lookup_url;
04434     }
04435   } else if (t_state.api_lock_url == HttpTransact::LOCK_URL_SECOND) {
04436     s_url = &t_state.cache_info.lookup_url_storage;
04437   } else {
04438     ink_assert(t_state.api_lock_url == HttpTransact::LOCK_URL_ORIGINAL);
04439     s_url = &(t_state.cache_info.original_url);
04440     restore_client_request = true;
04441   }
04442 
04443   // modify client request to make it have the url we are going to
04444   // store into the cache
04445   if (restore_client_request) {
04446     c_url = t_state.hdr_info.client_request.url_get();
04447     s_url->copy(c_url);
04448   }
04449 
04450   ink_assert(s_url != NULL && s_url->valid());
04451   DebugSM("http_cache_write", "[%" PRId64 "] writing to cache with URL %s", sm_id, s_url->string_get(&t_state.arena));
04452   Action *cache_action_handle = c_sm->open_write(s_url, &t_state.hdr_info.client_request,
04453                                                  object_read_info,
04454                                                  (time_t) ((t_state.cache_control.pin_in_cache_for < 0) ?
04455                                                            0 : t_state.cache_control.pin_in_cache_for),
04456                                                  retry, allow_multiple);
04457 
04458   if (cache_action_handle != ACTION_RESULT_DONE) {
04459     ink_assert(!pending_action);
04460     pending_action = cache_action_handle;
04461     historical_action = pending_action;
04462   }
04463 }
04464 
04465 //////////////////////////////////////////////////////////////////////////
04466 //
04467 //  HttpSM::do_http_server_open()
04468 //
04469 //////////////////////////////////////////////////////////////////////////
04470 void
04471 HttpSM::do_http_server_open(bool raw)
04472 {
04473   int ip_family = t_state.current.server->addr.sa.sa_family;
04474   DebugSM("http_track", "entered inside do_http_server_open ][%s]", ats_ip_family_name(ip_family));
04475 
04476   ink_assert(server_entry == NULL);
04477 
04478   // ua_entry can be null if a scheduled update is also a reverse proxy
04479   // request. Added REVPROXY to the assert below, and then changed checks
04480   // to be based on ua_session != NULL instead of req_flavor value.
04481   ink_assert(ua_entry != NULL ||
04482              t_state.req_flavor == HttpTransact::REQ_FLAVOR_SCHEDULED_UPDATE ||
04483              t_state.req_flavor == HttpTransact::REQ_FLAVOR_REVPROXY);
04484 
04485   ink_assert(pending_action == NULL);
04486 
04487   if (false == t_state.api_server_addr_set) {
04488     ink_assert(t_state.current.server->port > 0);
04489     t_state.current.server->addr.port() = htons(t_state.current.server->port);
04490   } else {
04491     ink_assert(ats_ip_port_cast(&t_state.current.server->addr) != 0);
04492   }
04493 
04494   char addrbuf[INET6_ADDRPORTSTRLEN];
04495   DebugSM("http", "[%" PRId64 "] open connection to %s: %s",
04496         sm_id, t_state.current.server->name, 
04497         ats_ip_nptop(&t_state.current.server->addr.sa, addrbuf, sizeof(addrbuf)));
04498 
04499   if (plugin_tunnel) {
04500     PluginVCCore *t = plugin_tunnel;
04501     plugin_tunnel = NULL;
04502     Action *pvc_action_handle = t->connect_re(this);
04503 
04504     // This connect call is always reentrant
04505     ink_release_assert(pvc_action_handle == ACTION_RESULT_DONE);
04506     return;
04507   }
04508 
04509   DebugSM("http_seq", "[HttpSM::do_http_server_open] Sending request to server");
04510 
04511   milestones.server_connect = ink_get_hrtime();
04512   if (milestones.server_first_connect == 0) {
04513     milestones.server_first_connect = milestones.server_connect;
04514   }
04515 
04516   if (t_state.pCongestionEntry != NULL) {
04517     if (t_state.pCongestionEntry->F_congested() && (!t_state.pCongestionEntry->proxy_retry(milestones.server_connect))) {
04518       t_state.congestion_congested_or_failed = 1;
04519       t_state.pCongestionEntry->stat_inc_F();
04520       CONGEST_INCREMENT_DYN_STAT(congested_on_F_stat);
04521       handleEvent(CONGESTION_EVENT_CONGESTED_ON_F, NULL);
04522       return;
04523     } else if (t_state.pCongestionEntry->M_congested(ink_hrtime_to_sec(milestones.server_connect))) {
04524       t_state.pCongestionEntry->stat_inc_M();
04525       t_state.congestion_congested_or_failed = 1;
04526       CONGEST_INCREMENT_DYN_STAT(congested_on_M_stat);
04527       handleEvent(CONGESTION_EVENT_CONGESTED_ON_M, NULL);
04528       return;
04529     }
04530   }
04531   // If this is not a raw connection, we try to get a session from the
04532   //  shared session pool.  Raw connections are for SSLs tunnel and
04533   //  require a new connection
04534   //
04535 
04536   // This problem with POST requests is a bug.  Because of the issue of the
04537   // race with us sending a request after server has closed but before the FIN
04538   // gets to us, we should open a new connection for POST.  I believe TS used
04539   // to do this but as far I can tell the code that prevented keep-alive if
04540   // there is a request body has been removed.
04541 
04542   if (raw == false && TS_SERVER_SESSION_SHARING_MATCH_NONE != t_state.txn_conf->server_session_sharing_match &&
04543       (t_state.txn_conf->keep_alive_post_out == 1 || t_state.hdr_info.request_content_length == 0) &&
04544        !is_private() && ua_session != NULL) {
04545     HSMresult_t shared_result;
04546     shared_result = httpSessionManager.acquire_session(this,    // state machine
04547                                                        &t_state.current.server->addr.sa,    // ip + port
04548                                                        t_state.current.server->name,    // hostname
04549                                                        ua_session,      // has ptr to bound ua sessions
04550                                                        this     // sm
04551     );
04552 
04553     switch (shared_result) {
04554     case HSM_DONE:
04555       hsm_release_assert(server_session != NULL);
04556       handle_http_server_open();
04557       return;
04558     case HSM_NOT_FOUND:
04559       hsm_release_assert(server_session == NULL);
04560       break;
04561     case HSM_RETRY:
04562       //  Could not get shared pool lock
04563       //   FIX: should retry lock
04564       break;
04565     default:
04566       hsm_release_assert(0);
04567     }
04568   }
04569   // This bug was due to when share_server_sessions is set to 0
04570   // and we have keep-alive, we are trying to open a new server session
04571   // when we already have an attached server session.
04572   else if ((TS_SERVER_SESSION_SHARING_MATCH_NONE == t_state.txn_conf->server_session_sharing_match || is_private()) &&
04573            (ua_session != NULL)) {
04574     HttpServerSession *existing_ss = ua_session->get_server_session();
04575 
04576     if (existing_ss) {
04577       // [amc] Not sure if this is the best option, but we don't get here unless session sharing is disabled
04578       // so there's no point in further checking on the match or pool values. But why check anything? The
04579       // client has already exchanged a request with this specific origin server and has sent another one
04580       // shouldn't we just automatically keep the association?
04581       if (ats_ip_addr_port_eq(&existing_ss->server_ip.sa, &t_state.current.server->addr.sa)) {
04582         ua_session->attach_server_session(NULL);
04583         existing_ss->state = HSS_ACTIVE;
04584         this->attach_server_session(existing_ss);
04585         hsm_release_assert(server_session != NULL);
04586         handle_http_server_open();
04587         return;
04588       } else {
04589         // As this is in the non-sharing configuration, we want to close
04590         // the existing connection and call connect_re to get a new one
04591         existing_ss->release();
04592         ua_session->attach_server_session(NULL);
04593       }
04594     }
04595   }
04596   // Otherwise, we release the existing connection and call connect_re
04597   // to get a new one.
04598   // ua_session is null when t_state.req_flavor == REQ_FLAVOR_SCHEDULED_UPDATE
04599   else if (ua_session != NULL) {
04600     HttpServerSession *existing_ss = ua_session->get_server_session();
04601     if (existing_ss) {
04602       existing_ss->release();
04603       ua_session->attach_server_session(NULL);
04604     }
04605   }
04606   // Check to see if we have reached the max number of connections.
04607   // Atomically read the current number of connections and check to see
04608   // if we have gone above the max allowed.
04609   if (t_state.http_config_param->server_max_connections > 0) {
04610     int64_t sum;
04611 
04612     HTTP_READ_GLOBAL_DYN_SUM(http_current_server_connections_stat, sum);
04613 
04614     // Note that there is a potential race condition here where
04615     // the value of the http_current_server_connections_stat gets changed
04616     // between the statement above and the check below.
04617     // If this happens, we might go over the max by 1 but this is ok.
04618     if (sum >= t_state.http_config_param->server_max_connections) {
04619       ink_assert(pending_action == NULL);
04620       pending_action = eventProcessor.schedule_in(this, HRTIME_MSECONDS(100));
04621       httpSessionManager.purge_keepalives();
04622       return;
04623     }
04624   }
04625   // Check to see if we have reached the max number of connections on this
04626   // host.
04627   if (t_state.txn_conf->origin_max_connections > 0) {
04628     ConnectionCount *connections = ConnectionCount::getInstance();
04629 
04630     char addrbuf[INET6_ADDRSTRLEN];
04631     if (connections->getCount((t_state.current.server->addr)) >= t_state.txn_conf->origin_max_connections) {
04632       DebugSM("http", "[%" PRId64 "] over the number of connection for this host: %s", sm_id,
04633         ats_ip_ntop(&t_state.current.server->addr.sa, addrbuf, sizeof(addrbuf)));
04634       ink_assert(pending_action == NULL);
04635       pending_action = eventProcessor.schedule_in(this, HRTIME_MSECONDS(100));
04636       return;
04637     }
04638   }
04639 
04640   // We did not manage to get an existing session
04641   //  and need to open a new connection
04642   Action *connect_action_handle;
04643 
04644   NetVCOptions opt;
04645   opt.f_blocking_connect = false;
04646   opt.set_sock_param(t_state.txn_conf->sock_recv_buffer_size_out,
04647                      t_state.txn_conf->sock_send_buffer_size_out,
04648                      t_state.txn_conf->sock_option_flag_out,
04649                      t_state.txn_conf->sock_packet_mark_out,
04650                      t_state.txn_conf->sock_packet_tos_out);
04651 
04652   opt.ip_family = ip_family;
04653 
04654   if (ua_session) {
04655     opt.local_port = ua_session->outbound_port;
04656 
04657     IpAddr& outbound_ip = AF_INET6 == ip_family ? ua_session->outbound_ip6 : ua_session->outbound_ip4;
04658     if (outbound_ip.isValid()) {
04659       opt.addr_binding = NetVCOptions::INTF_ADDR;
04660       opt.local_ip = outbound_ip;
04661     } else if (ua_session->f_outbound_transparent) {
04662       opt.addr_binding = NetVCOptions::FOREIGN_ADDR;
04663       opt.local_ip = t_state.client_info.addr;
04664       /* If the connection is server side transparent, we can bind to the
04665          port that the client chose instead of randomly assigning one at
04666          the proxy.  This is controlled by the 'use_client_source_port'
04667          configuration parameter.
04668       */
04669 
04670       NetVConnection *client_vc = ua_session->get_netvc();
04671       if (t_state.http_config_param->use_client_source_port && NULL != client_vc) {
04672         opt.local_port = client_vc->get_remote_port();
04673       }
04674     }
04675   }
04676 
04677   int scheme_to_use = t_state.scheme; // get initial scheme
04678 
04679   if (!t_state.is_websocket) { // if not websocket, then get scheme from server request
04680     int new_scheme_to_use = t_state.hdr_info.server_request.url_get()->scheme_get_wksidx();
04681     // if the server_request url scheme was never set, try the client_request
04682     if (new_scheme_to_use < 0) {
04683       new_scheme_to_use = t_state.hdr_info.client_request.url_get()->scheme_get_wksidx();
04684     }
04685     if (new_scheme_to_use >= 0) { // found a new scheme, use it
04686       scheme_to_use = new_scheme_to_use;
04687     }
04688   }
04689 
04690   if (scheme_to_use == URL_WKSIDX_HTTPS) {
04691     DebugSM("http", "calling sslNetProcessor.connect_re");
04692     int len = 0;
04693     const char * host = t_state.hdr_info.server_request.host_get(&len);
04694     opt.set_sni_servername(host, len);
04695     connect_action_handle = sslNetProcessor.connect_re(this,    // state machine
04696                                                        &t_state.current.server->addr.sa,    // addr + port
04697                                                        &opt);
04698   } else {
04699     if (t_state.method != HTTP_WKSIDX_CONNECT) {
04700       DebugSM("http", "calling netProcessor.connect_re");
04701       connect_action_handle = netProcessor.connect_re(this,     // state machine
04702                                                       &t_state.current.server->addr.sa,    // addr + port
04703                                                       &opt);
04704     } else {
04705       // Setup the timeouts
04706       // Set the inactivity timeout to the connect timeout so that we
04707       //   we fail this server if it doesn't start sending the response
04708       //   header
04709       MgmtInt connect_timeout;
04710       if (t_state.method == HTTP_WKSIDX_POST || t_state.method == HTTP_WKSIDX_PUT) {
04711         connect_timeout = t_state.txn_conf->post_connect_attempts_timeout;
04712       } else if (t_state.current.server == &t_state.parent_info) {
04713         connect_timeout = t_state.http_config_param->parent_connect_timeout;
04714       } else {
04715         if (t_state.pCongestionEntry != NULL)
04716           connect_timeout = t_state.pCongestionEntry->connect_timeout();
04717         else
04718           connect_timeout = t_state.txn_conf->connect_attempts_timeout;
04719       }
04720       DebugSM("http", "calling netProcessor.connect_s");
04721       connect_action_handle = netProcessor.connect_s(this,      // state machine
04722                                                      &t_state.current.server->addr.sa,    // addr + port
04723                                                      connect_timeout, &opt);
04724     }
04725   }
04726 
04727   if (connect_action_handle != ACTION_RESULT_DONE) {
04728     ink_assert(!pending_action);
04729     pending_action = connect_action_handle;
04730     historical_action = pending_action;
04731   }
04732 
04733   return;
04734 }
04735 
04736 
04737 void
04738 HttpSM::do_icp_lookup()
04739 {
04740   ink_assert(pending_action == NULL);
04741 
04742   URL *o_url = &t_state.cache_info.original_url;
04743 
04744   Action *icp_lookup_action_handle = icpProcessor.ICPQuery(this,
04745                                                            o_url->valid()? o_url : t_state.cache_info.lookup_url);
04746 
04747   if (icp_lookup_action_handle != ACTION_RESULT_DONE) {
04748     ink_assert(!pending_action);
04749     pending_action = icp_lookup_action_handle;
04750     historical_action = pending_action;
04751   }
04752 
04753   return;
04754 }
04755 
04756 void
04757 HttpSM::do_api_callout_internal()
04758 {
04759   if (t_state.backdoor_request) {
04760     handle_api_return();
04761     return;
04762   }
04763 
04764   switch (t_state.api_next_action) {
04765   case HttpTransact::SM_ACTION_API_SM_START:
04766     cur_hook_id = TS_HTTP_TXN_START_HOOK;
04767     break;
04768   case HttpTransact::SM_ACTION_API_PRE_REMAP:
04769     cur_hook_id = TS_HTTP_PRE_REMAP_HOOK;
04770     break;
04771   case HttpTransact::SM_ACTION_API_POST_REMAP:
04772     cur_hook_id = TS_HTTP_POST_REMAP_HOOK;
04773     break;
04774   case HttpTransact::SM_ACTION_API_READ_REQUEST_HDR:
04775     cur_hook_id = TS_HTTP_READ_REQUEST_HDR_HOOK;
04776     break;
04777   case HttpTransact::SM_ACTION_API_OS_DNS:
04778     cur_hook_id = TS_HTTP_OS_DNS_HOOK;
04779     break;
04780   case HttpTransact::SM_ACTION_API_SEND_REQUEST_HDR:
04781     cur_hook_id = TS_HTTP_SEND_REQUEST_HDR_HOOK;
04782     break;
04783   case HttpTransact::SM_ACTION_API_READ_CACHE_HDR:
04784     cur_hook_id = TS_HTTP_READ_CACHE_HDR_HOOK;
04785     break;
04786   case HttpTransact::SM_ACTION_API_CACHE_LOOKUP_COMPLETE:
04787     cur_hook_id = TS_HTTP_CACHE_LOOKUP_COMPLETE_HOOK;
04788     break;
04789   case HttpTransact::SM_ACTION_API_READ_RESPONSE_HDR:
04790     cur_hook_id = TS_HTTP_READ_RESPONSE_HDR_HOOK;
04791     break;
04792   case HttpTransact::SM_ACTION_API_SEND_RESPONSE_HDR:
04793     cur_hook_id = TS_HTTP_SEND_RESPONSE_HDR_HOOK;
04794     milestones.ua_begin_write = ink_get_hrtime();
04795     break;
04796   case HttpTransact::SM_ACTION_API_SM_SHUTDOWN:
04797     if (callout_state == HTTP_API_IN_CALLOUT || callout_state == HTTP_API_DEFERED_SERVER_ERROR) {
04798       callout_state = HTTP_API_DEFERED_CLOSE;
04799       return;
04800     } else {
04801       cur_hook_id = TS_HTTP_TXN_CLOSE_HOOK;
04802     }
04803 
04804     break;
04805   default:
04806     cur_hook_id = (TSHttpHookID) - 1;
04807     ink_assert(!"not reached");
04808   }
04809 
04810   cur_hook = NULL;
04811   cur_hooks = 0;
04812   state_api_callout(0, NULL);
04813 }
04814 
04815 VConnection *
04816 HttpSM::do_post_transform_open()
04817 {
04818   ink_assert(post_transform_info.vc == NULL);
04819 
04820   if (is_action_tag_set("http_post_nullt")) {
04821     txn_hook_prepend(TS_HTTP_REQUEST_TRANSFORM_HOOK, transformProcessor.null_transform(mutex));
04822   }
04823 
04824   post_transform_info.vc = transformProcessor.open(this, api_hooks.get(TS_HTTP_REQUEST_TRANSFORM_HOOK));
04825   if (post_transform_info.vc) {
04826     // Record the transform VC in our table
04827     post_transform_info.entry = vc_table.new_entry();
04828     post_transform_info.entry->vc = post_transform_info.vc;
04829     post_transform_info.entry->vc_type = HTTP_TRANSFORM_VC;
04830   }
04831 
04832   return post_transform_info.vc;
04833 }
04834 
04835 VConnection *
04836 HttpSM::do_transform_open()
04837 {
04838   ink_assert(transform_info.vc == NULL);
04839   APIHook *hooks;
04840 
04841   if (is_action_tag_set("http_nullt")) {
04842     txn_hook_prepend(TS_HTTP_RESPONSE_TRANSFORM_HOOK, transformProcessor.null_transform(mutex));
04843   }
04844 
04845   hooks = api_hooks.get(TS_HTTP_RESPONSE_TRANSFORM_HOOK);
04846   if (hooks) {
04847     transform_info.vc = transformProcessor.open(this, hooks);
04848 
04849     // Record the transform VC in our table
04850     transform_info.entry = vc_table.new_entry();
04851     transform_info.entry->vc = transform_info.vc;
04852     transform_info.entry->vc_type = HTTP_TRANSFORM_VC;
04853   } else {
04854     transform_info.vc = NULL;
04855   }
04856 
04857   return transform_info.vc;
04858 }
04859 
04860 void
04861 HttpSM::mark_host_failure(HostDBInfo * info, time_t time_down)
04862 {
04863   char addrbuf[INET6_ADDRPORTSTRLEN];
04864 
04865   if (info->app.http_data.last_failure == 0) {
04866     char *url_str = t_state.hdr_info.client_request.url_string_get(&t_state.arena, 0);
04867     Log::error("CONNECT: could not connect to %s "
04868                "for '%s' (setting last failure time)",
04869                ats_ip_ntop(&t_state.current.server->addr.sa, addrbuf, sizeof(addrbuf)),
04870                url_str ? url_str : "<none>"
04871     );
04872     if (url_str)
04873       t_state.arena.str_free(url_str);
04874   }
04875 
04876   info->app.http_data.last_failure = time_down;
04877 
04878 
04879 #ifdef DEBUG
04880   ink_assert(ink_cluster_time() + t_state.txn_conf->down_server_timeout > time_down);
04881 #endif
04882 
04883   DebugSM("http", "[%" PRId64 "] hostdb update marking IP: %s as down",
04884         sm_id,
04885         ats_ip_nptop(&t_state.current.server->addr.sa, addrbuf, sizeof(addrbuf)));
04886 }
04887 
04888 void
04889 HttpSM::set_ua_abort(HttpTransact::AbortState_t ua_abort, int event)
04890 {
04891   t_state.client_info.abort = ua_abort;
04892 
04893   switch (ua_abort) {
04894   case HttpTransact::ABORTED:
04895   case HttpTransact::MAYBE_ABORTED:
04896     t_state.squid_codes.log_code = SQUID_LOG_ERR_CLIENT_ABORT;
04897     break;
04898   default:
04899     // Handled here:
04900     // HttpTransact::ABORT_UNDEFINED, HttpTransact::DIDNOT_ABORT
04901     break;
04902   }
04903 
04904   // Set the connection attribute code for the client so that
04905   //   we log the client finish code correctly
04906   switch (event) {
04907   case VC_EVENT_ACTIVE_TIMEOUT:
04908     t_state.client_info.state = HttpTransact::ACTIVE_TIMEOUT;
04909     break;
04910   case VC_EVENT_INACTIVITY_TIMEOUT:
04911     t_state.client_info.state = HttpTransact::INACTIVE_TIMEOUT;
04912     break;
04913   case VC_EVENT_ERROR:
04914     t_state.client_info.state = HttpTransact::CONNECTION_ERROR;
04915     break;
04916   }
04917 }
04918 
04919 void
04920 HttpSM::mark_server_down_on_client_abort()
04921 {
04922   /////////////////////////////////////////////////////
04923   //  Check see if the client aborted because the    //
04924   //  origin server was too slow in sending the      //
04925   //  response header.  If so, mark that             //
04926   //  server as down so other clients won't try to   //
04927   //  for revalidation or select it from a round     //
04928   //  robin set                                      //
04929   //                                                 //
04930   //  Note: we do not want to mark parent or icp     //
04931   //  proxies as down with this metric because       //
04932   //  that upstream proxy may be working but         //
04933   //  the actual origin server is one that is hung   //
04934   /////////////////////////////////////////////////////
04935   if (t_state.current.request_to == HttpTransact::ORIGIN_SERVER && t_state.hdr_info.request_content_length == 0) {
04936     if (milestones.server_first_connect != 0 && milestones.server_first_read == 0) {
04937       // Check to see if client waited for the threshold
04938       //  to declare the origin server as down
04939       ink_hrtime wait = ink_get_hrtime() - milestones.server_first_connect;
04940       if (wait < 0) {
04941         wait = 0;
04942       }
04943       if (ink_hrtime_to_sec(wait) > t_state.txn_conf->client_abort_threshold) {
04944         t_state.current.server->set_connect_fail(ETIMEDOUT);
04945         do_hostdb_update_if_necessary();
04946       }
04947     }
04948   }
04949 }
04950 
04951 // void HttpSM::release_server_session()
04952 //
04953 //  Called when we are not tunneling a response from the
04954 //   server.  If the session is keep alive, release it back to the
04955 //   shared pool, otherwise close it
04956 //
04957 void
04958 HttpSM::release_server_session(bool serve_from_cache)
04959 {
04960   if (server_session == NULL) {
04961     return;
04962   }
04963 
04964   if (TS_SERVER_SESSION_SHARING_MATCH_NONE != t_state.txn_conf->server_session_sharing_match &&
04965       t_state.current.server->keep_alive == HTTP_KEEPALIVE &&
04966       t_state.hdr_info.server_response.valid() &&
04967       (t_state.hdr_info.server_response.status_get() == HTTP_STATUS_NOT_MODIFIED ||
04968        (t_state.hdr_info.server_request.method_get_wksidx() == HTTP_WKSIDX_HEAD
04969         && t_state.www_auth_content != HttpTransact::CACHE_AUTH_NONE)) &&
04970       plugin_tunnel_type == HTTP_NO_PLUGIN_TUNNEL) {
04971     HTTP_DECREMENT_DYN_STAT(http_current_server_transactions_stat);
04972     server_session->server_trans_stat--;
04973     server_session->attach_hostname(t_state.current.server->name);
04974     if (t_state.www_auth_content == HttpTransact::CACHE_AUTH_NONE || serve_from_cache == false) {
04975       server_session->release();
04976     } else {
04977       // an authenticated server connection - attach to the local client
04978       // we are serving from cache for the current transaction
04979       t_state.www_auth_content = HttpTransact::CACHE_AUTH_SERVE;
04980       ua_session->attach_server_session(server_session, false);
04981     }
04982   } else {
04983     server_session->do_io_close();
04984   }
04985 
04986   ink_assert(server_entry->vc == server_session);
04987   server_entry->in_tunnel = true;
04988   vc_table.cleanup_entry(server_entry);
04989   server_entry = NULL;
04990   server_session = NULL;
04991 }
04992 
04993 // void HttpSM::handle_post_failure()
04994 //
04995 //   We failed in our attempt post (or put) a document
04996 //    to the server.  Two cases happen here.  The normal
04997 //    one is the server died, in which case we ought to
04998 //    return an error to the client.  The second one is
04999 //    stupid.  The server returned a response without reading
05000 //    all the post data.  In order to be as transparent as
05001 //    possible process the server's response
05002 void
05003 HttpSM::handle_post_failure()
05004 {
05005   STATE_ENTER(&HttpSM::handle_post_failure, VC_EVENT_NONE);
05006 
05007   ink_assert(ua_entry->vc == ua_session);
05008   ink_assert(server_entry->eos == true);
05009 
05010   // First order of business is to clean up from
05011   //  the tunnel
05012   // note: since the tunnel is providing the buffer for a lingering
05013   // client read (for abort watching purposes), we need to stop
05014   // the read
05015   if (false == t_state.redirect_info.redirect_in_process) {
05016     ua_entry->read_vio = ua_session->do_io_read(this, 0, NULL);
05017   }
05018   ua_entry->in_tunnel = false;
05019   server_entry->in_tunnel = false;
05020 
05021   // disable redirection in case we got a partial response and then EOS, because the buffer might not
05022   // have the full post and it's deallocating the post buffers here
05023   enable_redirection = false;
05024   tunnel.deallocate_redirect_postdata_buffers();
05025 
05026   // Don't even think about doing keep-alive after this debacle
05027   t_state.client_info.keep_alive = HTTP_NO_KEEPALIVE;
05028   t_state.current.server->keep_alive = HTTP_NO_KEEPALIVE;
05029 
05030   if (server_buffer_reader->read_avail() > 0) {
05031     tunnel.reset();
05032     // There's data from the server so try to read the header
05033     setup_server_read_response_header();
05034   } else {
05035     tunnel.deallocate_buffers();
05036     tunnel.reset();
05037     // Server died
05038     vc_table.cleanup_entry(server_entry);
05039     server_entry = NULL;
05040     server_session = NULL;
05041     t_state.current.state = HttpTransact::CONNECTION_CLOSED;
05042     call_transact_and_set_next_state(HttpTransact::HandleResponse);
05043   }
05044 }
05045 
05046 // void HttpSM::handle_http_server_open()
05047 //
05048 //   The server connection is now open.  If there is a POST or PUT,
05049 //    we need setup a transform is there is one otherwise we need
05050 //    to send the request header
05051 //
05052 void
05053 HttpSM::handle_http_server_open()
05054 {
05055   // [bwyatt] applying per-transaction OS netVC options here
05056   //          IFF they differ from the netVC's current options.
05057   //          This should keep this from being redundant on a
05058   //          server session's first transaction.
05059   if (NULL != server_session) {
05060     NetVConnection *vc = server_session->get_netvc();
05061     if (vc != NULL &&
05062         (vc->options.sockopt_flags != t_state.txn_conf->sock_option_flag_out ||
05063          vc->options.packet_mark != t_state.txn_conf->sock_packet_mark_out ||
05064          vc->options.packet_tos != t_state.txn_conf->sock_packet_tos_out )) {
05065       vc->options.sockopt_flags = t_state.txn_conf->sock_option_flag_out;
05066       vc->options.packet_mark = t_state.txn_conf->sock_packet_mark_out;
05067       vc->options.packet_tos = t_state.txn_conf->sock_packet_tos_out;
05068       vc->apply_options();
05069     }
05070   }
05071 
05072   if (t_state.pCongestionEntry != NULL) {
05073     if (t_state.congestion_connection_opened == 0) {
05074       t_state.congestion_connection_opened = 1;
05075       t_state.pCongestionEntry->connection_opened();
05076     }
05077   }
05078 
05079   int method = t_state.hdr_info.server_request.method_get_wksidx();
05080   if (method != HTTP_WKSIDX_TRACE &&
05081       (t_state.hdr_info.request_content_length > 0 || t_state.client_info.transfer_encoding == HttpTransact::CHUNKED_ENCODING) &&
05082        do_post_transform_open()) {
05083     do_setup_post_tunnel(HTTP_TRANSFORM_VC);
05084   } else {
05085     setup_server_send_request_api();
05086   }
05087 }
05088 
05089 // void HttpSM::handle_server_setup_error(int event, void* data)
05090 //
05091 //   Handles setting t_state.current.state and calling
05092 //    Transact in between opening an origin server connection
05093 //    and receiving the response header (in the case of the
05094 //    POST, a post tunnel happens in between the sending
05095 //    request header and reading the response header
05096 //
05097 void
05098 HttpSM::handle_server_setup_error(int event, void *data)
05099 {
05100   VIO *vio = (VIO *) data;
05101   ink_assert(vio != NULL);
05102 
05103   STATE_ENTER(&HttpSM::handle_server_setup_error, event);
05104 
05105   // If there is POST or PUT tunnel wait for the tunnel
05106   //  to figure out that things have gone to hell
05107 
05108   if (tunnel.is_tunnel_active()) {
05109     ink_assert(server_entry->read_vio == data);
05110     DebugSM("http", "[%" PRId64 "] [handle_server_setup_error] "
05111           "forwarding event %s to post tunnel", sm_id, HttpDebugNames::get_event_name(event));
05112     HttpTunnelConsumer *c = tunnel.get_consumer(server_entry->vc);
05113     // it is possible only user agent post->post transform is set up
05114     // this happened for Linux iocore where NET_EVENT_OPEN was returned
05115     // for a non-existing listening port. the hack is to pass the error
05116     // event for server connection to post_transform_info
05117     if (c == NULL && post_transform_info.vc) {
05118       c = tunnel.get_consumer(post_transform_info.vc);
05119       // c->handler_state = HTTP_SM_TRANSFORM_FAIL;
05120 
05121       HttpTunnelProducer *ua_producer = c->producer;
05122       ink_assert(ua_entry->vc == ua_producer->vc);
05123 
05124       ua_entry->vc_handler = &HttpSM::state_watch_for_client_abort;
05125       ua_entry->read_vio = ua_producer->vc->do_io_read(this, INT64_MAX, c->producer->read_buffer);
05126       ua_producer->vc->do_io_shutdown(IO_SHUTDOWN_READ);
05127 
05128       ua_producer->alive = false;
05129       ua_producer->handler_state = HTTP_SM_POST_SERVER_FAIL;
05130       tunnel.handleEvent(VC_EVENT_ERROR, c->write_vio);
05131     } else {
05132       tunnel.handleEvent(event, c->write_vio);
05133     }
05134     return;
05135   } else {
05136     if (post_transform_info.vc) {
05137       HttpTunnelConsumer *c = tunnel.get_consumer(post_transform_info.vc);
05138       if (c && c->handler_state == HTTP_SM_TRANSFORM_OPEN) {
05139         vc_table.cleanup_entry(post_transform_info.entry);
05140         post_transform_info.entry = NULL;
05141         tunnel.deallocate_buffers();
05142         tunnel.reset();
05143       }
05144     }
05145   }
05146 
05147   if (event == VC_EVENT_ERROR) {
05148     t_state.cause_of_death_errno = server_session->get_netvc()->lerrno;
05149   }
05150 
05151   switch (event) {
05152   case VC_EVENT_EOS:
05153     t_state.current.state = HttpTransact::CONNECTION_CLOSED;
05154     break;
05155   case VC_EVENT_ERROR:
05156     t_state.current.state = HttpTransact::CONNECTION_ERROR;
05157     break;
05158   case VC_EVENT_ACTIVE_TIMEOUT:
05159     t_state.current.state = HttpTransact::ACTIVE_TIMEOUT;
05160     break;
05161 
05162   case VC_EVENT_INACTIVITY_TIMEOUT:
05163     // If we're writing the request and get an inactivity timeout
05164     //   before any bytes are written, the connection to the
05165     //   server failed
05166     // In case of TIMEOUT, the iocore sends back
05167     // server_entry->read_vio instead of the write_vio
05168     // if (vio->op == VIO::WRITE && vio->ndone == 0) {
05169     if (server_entry->write_vio->nbytes > 0 && server_entry->write_vio->ndone == 0) {
05170       t_state.current.state = HttpTransact::CONNECTION_ERROR;
05171     } else {
05172       t_state.current.state = HttpTransact::INACTIVE_TIMEOUT;
05173     }
05174     break;
05175   default:
05176     ink_release_assert(0);
05177   }
05178 
05179   // Closedown server connection and deallocate buffers
05180   ink_assert(server_entry->in_tunnel == false);
05181   vc_table.cleanup_entry(server_entry);
05182   server_entry = NULL;
05183   server_session = NULL;
05184 
05185   // if we are waiting on a plugin callout for
05186   //   HTTP_API_SEND_REQUEST_HDR defer calling transact until
05187   //   after we've finished processing the plugin callout
05188   switch (callout_state) {
05189   case HTTP_API_NO_CALLOUT:
05190     // Normal fast path case, no api callouts in progress
05191     break;
05192   case HTTP_API_IN_CALLOUT:
05193   case HTTP_API_DEFERED_SERVER_ERROR:
05194     // Callout in progress note that we are in deferring
05195     //   the server error
05196     callout_state = HTTP_API_DEFERED_SERVER_ERROR;
05197     return;
05198   case HTTP_API_DEFERED_CLOSE:
05199     // The user agent has shutdown killing the sm
05200     //   but we are stuck waiting for the server callout
05201     //   to finish so do nothing here.  We don't care
05202     //   about the server connection at this and are
05203     //   just waiting till we can execute the close hook
05204     return;
05205   default:
05206     ink_release_assert(0);
05207   }
05208 
05209   call_transact_and_set_next_state(HttpTransact::HandleResponse);
05210 }
05211 
05212 void
05213 HttpSM::setup_transform_to_server_transfer()
05214 {
05215   ink_assert(post_transform_info.vc != NULL);
05216   ink_assert(post_transform_info.entry->vc == post_transform_info.vc);
05217 
05218   int64_t nbytes = t_state.hdr_info.transform_request_cl;
05219   int64_t alloc_index = buffer_size_to_index(nbytes);
05220   MIOBuffer *post_buffer = new_MIOBuffer(alloc_index);
05221   IOBufferReader *buf_start = post_buffer->alloc_reader();
05222 
05223   HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::tunnel_handler_post);
05224 
05225   HttpTunnelConsumer *c = tunnel.get_consumer(post_transform_info.vc);
05226 
05227   HttpTunnelProducer *p = tunnel.add_producer(post_transform_info.vc,
05228                                               nbytes,
05229                                               buf_start,
05230                                               &HttpSM::tunnel_handler_transform_read,
05231                                               HT_TRANSFORM,
05232                                               "post transform");
05233   tunnel.chain(c,p);
05234   post_transform_info.entry->in_tunnel = true;
05235 
05236   tunnel.add_consumer(server_entry->vc,
05237                       post_transform_info.vc, &HttpSM::tunnel_handler_post_server, HT_HTTP_SERVER, "http server post");
05238   server_entry->in_tunnel = true;
05239 
05240   tunnel.tunnel_run(p);
05241 }
05242 
05243 #ifdef PROXY_DRAIN
05244 void
05245 HttpSM::do_drain_request_body()
05246 {
05247   int64_t post_bytes = t_state.hdr_info.request_content_length;
05248   int64_t avail = ua_buffer_reader->read_avail();
05249 
05250   int64_t act_on = (avail < post_bytes) ? avail : post_bytes;
05251 
05252   client_request_body_bytes = act_on;
05253   ua_buffer_reader->consume(act_on);
05254 
05255   ink_assert(client_request_body_bytes <= post_bytes);
05256 
05257   if (client_request_body_bytes < post_bytes) {
05258     ua_buffer_reader->mbuf->size_index = buffer_size_to_index(t_state.hdr_info.request_content_length);
05259     ua_entry->vc_handler = &HttpSM::state_drain_client_request_body;
05260     ua_entry->read_vio = ua_entry->vc->do_io_read(this, post_bytes - client_request_body_bytes, ua_buffer_reader->mbuf);
05261   } else {
05262     call_transact_and_set_next_state(NULL);
05263   }
05264 }
05265 #endif /* PROXY_DRAIN */
05266 
05267 void
05268 HttpSM::do_setup_post_tunnel(HttpVC_t to_vc_type)
05269 {
05270   bool chunked = (t_state.client_info.transfer_encoding == HttpTransact::CHUNKED_ENCODING);
05271   bool post_redirect = false;
05272 
05273   HttpTunnelProducer *p = NULL;
05274   // YTS Team, yamsat Plugin
05275   // if redirect_in_process and redirection is enabled add static producer
05276 
05277   if (t_state.redirect_info.redirect_in_process && enable_redirection &&
05278       (tunnel.postbuf && tunnel.postbuf->postdata_copy_buffer_start != NULL &&
05279        tunnel.postbuf->postdata_producer_buffer != NULL)) {
05280     post_redirect = true;
05281     //copy the post data into a new producer buffer for static producer
05282     tunnel.postbuf->postdata_producer_buffer->write(tunnel.postbuf->postdata_copy_buffer_start);
05283     int64_t post_bytes = tunnel.postbuf->postdata_producer_reader->read_avail();
05284     transfered_bytes = post_bytes;
05285     p = tunnel.add_producer(HTTP_TUNNEL_STATIC_PRODUCER,
05286                             post_bytes,
05287                             tunnel.postbuf->postdata_producer_reader,
05288                             (HttpProducerHandler) NULL, HT_STATIC, "redirect static agent post");
05289     // the tunnel has taken over the buffer and will free it
05290     tunnel.postbuf->postdata_producer_buffer = NULL;
05291     tunnel.postbuf->postdata_producer_reader = NULL;
05292   } else {
05293     int64_t alloc_index;
05294     // content length is undefined, use default buffer size
05295     if (t_state.hdr_info.request_content_length == HTTP_UNDEFINED_CL) {
05296       alloc_index = (int) t_state.txn_conf->default_buffer_size_index;
05297       if (alloc_index<MIN_CONFIG_BUFFER_SIZE_INDEX || alloc_index> MAX_BUFFER_SIZE_INDEX) {
05298         alloc_index = DEFAULT_REQUEST_BUFFER_SIZE_INDEX;
05299       }
05300     } else {
05301       alloc_index = buffer_size_to_index(t_state.hdr_info.request_content_length);
05302     }
05303     MIOBuffer *post_buffer = new_MIOBuffer(alloc_index);
05304     IOBufferReader *buf_start = post_buffer->alloc_reader();
05305     int64_t post_bytes = chunked ? INT64_MAX : t_state.hdr_info.request_content_length;
05306 
05307     t_state.hdr_info.request_body_start = true;
05308     // Note: Many browsers, Netscape and IE included send two extra
05309     //  bytes (CRLF) at the end of the post.  We just ignore those
05310     //  bytes since the sending them is not spec
05311 
05312     // Next order of business if copy the remaining data from the
05313     //  header buffer into new buffer
05314     //
05315 
05316     client_request_body_bytes = post_buffer->write(ua_buffer_reader, chunked ? ua_buffer_reader->read_avail() : post_bytes);
05317     ua_buffer_reader->consume(client_request_body_bytes);
05318     p = tunnel.add_producer(ua_entry->vc, post_bytes - transfered_bytes, buf_start,
05319                             &HttpSM::tunnel_handler_post_ua, HT_HTTP_CLIENT, "user agent post");
05320   }
05321   ua_entry->in_tunnel = true;
05322 
05323   switch (to_vc_type) {
05324   case HTTP_TRANSFORM_VC:
05325     HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_request_wait_for_transform_read);
05326     ink_assert(post_transform_info.entry != NULL);
05327     ink_assert(post_transform_info.entry->vc == post_transform_info.vc);
05328     tunnel.add_consumer(post_transform_info.entry->vc,
05329                         ua_entry->vc, &HttpSM::tunnel_handler_transform_write, HT_TRANSFORM, "post transform");
05330     post_transform_info.entry->in_tunnel = true;
05331     break;
05332   case HTTP_SERVER_VC:
05333     //YTS Team, yamsat Plugin
05334     //When redirect in process is true and redirection is enabled
05335     //add http server as the consumer
05336     if (post_redirect) {
05337       HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::tunnel_handler_for_partial_post);
05338       tunnel.add_consumer(server_entry->vc,
05339                           HTTP_TUNNEL_STATIC_PRODUCER,
05340                           &HttpSM::tunnel_handler_post_server, HT_HTTP_SERVER, "redirect http server post");
05341     } else {
05342       HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::tunnel_handler_post);
05343       tunnel.add_consumer(server_entry->vc,
05344                           ua_entry->vc, &HttpSM::tunnel_handler_post_server, HT_HTTP_SERVER, "http server post");
05345     }
05346     server_entry->in_tunnel = true;
05347     break;
05348   default:
05349     ink_release_assert(0);
05350     break;
05351   }
05352 
05353   if (chunked)
05354     tunnel.set_producer_chunking_action(p, 0, TCA_PASSTHRU_CHUNKED_CONTENT);
05355 
05356   tunnel.tunnel_run(p);
05357 }
05358 
05359 // void HttpSM::perform_transform_cache_write_action()
05360 //
05361 //   Called to do cache write from the transform
05362 //
05363 void
05364 HttpSM::perform_transform_cache_write_action()
05365 {
05366   DebugSM("http", "[%" PRId64 "] perform_transform_cache_write_action %s", sm_id,
05367         HttpDebugNames::get_cache_action_name(t_state.cache_info.action));
05368 
05369   if (t_state.range_setup)
05370     return;
05371 
05372   switch (t_state.cache_info.transform_action) {
05373   case HttpTransact::CACHE_DO_NO_ACTION:
05374     {
05375       // Nothing to do
05376       transform_cache_sm.end_both();
05377       break;
05378     }
05379 
05380   case HttpTransact::CACHE_DO_WRITE:
05381     {
05382       transform_cache_sm.close_read();
05383       t_state.cache_info.transform_write_status = HttpTransact::CACHE_WRITE_IN_PROGRESS;
05384       setup_cache_write_transfer(&transform_cache_sm,
05385                                  transform_info.entry->vc,
05386                                  &t_state.cache_info.transform_store, client_response_hdr_bytes, "cache write t");
05387       break;
05388     }
05389 
05390   default:
05391     ink_release_assert(0);
05392     break;
05393   }
05394 
05395 }
05396 
05397 // void HttpSM::perform_cache_write_action()
05398 //
05399 //   Called to do cache write, delete and updates based
05400 //    on s->cache_info.action.  Does not setup cache
05401 //    read tunnels
05402 //
05403 void
05404 HttpSM::perform_cache_write_action()
05405 {
05406   DebugSM("http", "[%" PRId64 "] perform_cache_write_action %s",
05407         sm_id, HttpDebugNames::get_cache_action_name(t_state.cache_info.action));
05408 
05409   switch (t_state.cache_info.action) {
05410   case HttpTransact::CACHE_DO_NO_ACTION:
05411 
05412     {
05413       // Nothing to do
05414       cache_sm.end_both();
05415       break;
05416     }
05417 
05418   case HttpTransact::CACHE_DO_SERVE:
05419     {
05420       cache_sm.abort_write();
05421       break;
05422     }
05423 
05424   case HttpTransact::CACHE_DO_DELETE:
05425     {
05426       // Write close deletes the old alternate
05427       cache_sm.close_write();
05428       cache_sm.close_read();
05429       break;
05430     }
05431 
05432   case HttpTransact::CACHE_DO_SERVE_AND_DELETE:
05433     {
05434       // FIX ME: need to set up delete for after cache write has
05435       //   completed
05436       break;
05437     }
05438 
05439   case HttpTransact::CACHE_DO_SERVE_AND_UPDATE:
05440     {
05441       issue_cache_update();
05442       break;
05443     }
05444 
05445   case HttpTransact::CACHE_DO_UPDATE:
05446     {
05447       cache_sm.close_read();
05448       issue_cache_update();
05449       break;
05450     }
05451 
05452   case HttpTransact::CACHE_DO_WRITE:
05453   case HttpTransact::CACHE_DO_REPLACE:
05454     // Fix need to set up delete for after cache write has
05455     //   completed
05456     if (transform_info.entry == NULL || t_state.api_info.cache_untransformed == true) {
05457       cache_sm.close_read();
05458       t_state.cache_info.write_status = HttpTransact::CACHE_WRITE_IN_PROGRESS;
05459       setup_cache_write_transfer(&cache_sm,
05460                                  server_entry->vc,
05461                                  &t_state.cache_info.object_store, client_response_hdr_bytes, "cache write");
05462     } else {
05463       // We are not caching the untransformed.  We might want to
05464       //  use the cache writevc to cache the transformed copy
05465       ink_assert(transform_cache_sm.cache_write_vc == NULL);
05466       transform_cache_sm.cache_write_vc = cache_sm.cache_write_vc;
05467       cache_sm.cache_write_vc = NULL;
05468     }
05469     break;
05470 
05471   default:
05472     ink_release_assert(0);
05473     break;
05474   }
05475 }
05476 
05477 
05478 void
05479 HttpSM::issue_cache_update()
05480 {
05481   ink_assert(cache_sm.cache_write_vc != NULL);
05482   if (cache_sm.cache_write_vc) {
05483     t_state.cache_info.object_store.request_sent_time_set(t_state.request_sent_time);
05484     t_state.cache_info.object_store.response_received_time_set(t_state.response_received_time);
05485     ink_assert(t_state.cache_info.object_store.request_sent_time_get() > 0);
05486     ink_assert(t_state.cache_info.object_store.response_received_time_get() > 0);
05487     cache_sm.cache_write_vc->set_http_info(&t_state.cache_info.object_store);
05488     t_state.cache_info.object_store.clear();
05489   }
05490   // Now close the write which commits the update
05491   cache_sm.close_write();
05492 }
05493 
05494 int
05495 HttpSM::write_header_into_buffer(HTTPHdr * h, MIOBuffer * b)
05496 {
05497   int bufindex;
05498   int dumpoffset;
05499   int done, tmp;
05500   IOBufferBlock *block;
05501 
05502   dumpoffset = 0;
05503   do {
05504     bufindex = 0;
05505     tmp = dumpoffset;
05506     block = b->get_current_block();
05507     ink_assert(block->write_avail() > 0);
05508     done = h->print(block->start(), block->write_avail(), &bufindex, &tmp);
05509     dumpoffset += bufindex;
05510     ink_assert(bufindex > 0);
05511     b->fill(bufindex);
05512     if (!done) {
05513       b->add_block();
05514     }
05515   } while (!done);
05516 
05517   return dumpoffset;
05518 }
05519 
05520 void
05521 HttpSM::attach_server_session(HttpServerSession * s)
05522 {
05523   hsm_release_assert(server_session == NULL);
05524   hsm_release_assert(server_entry == NULL);
05525   hsm_release_assert(s->state == HSS_ACTIVE);
05526   server_session = s;
05527   server_session->transact_count++;
05528 
05529   // Set the mutex so that we have something to update
05530   //   stats with
05531   server_session->mutex = this->mutex;
05532 
05533   HTTP_INCREMENT_DYN_STAT(http_current_server_transactions_stat);
05534   ++s->server_trans_stat;
05535 
05536   // Record the VC in our table
05537   server_entry = vc_table.new_entry();
05538   server_entry->vc = server_session;
05539   server_entry->vc_type = HTTP_SERVER_VC;
05540   server_entry->vc_handler = &HttpSM::state_send_server_request_header;
05541 
05542   // Initiate a read on the session so that the SM and not
05543   //  session manager will get called back if the timeout occurs
05544   //  or the server closes on us.  The IO Core now requires us to
05545   //  do the read with a buffer and a size so preallocate the
05546   //  buffer
05547   server_buffer_reader = server_session->get_reader();
05548   server_entry->read_vio = server_session->do_io_read(this, INT64_MAX, server_session->read_buffer);
05549 
05550   // This call cannot be canceled or disabled on Windows at a different
05551   // time (callstack). After this function, all transactions will send
05552   // a request to the origin server. It is possible that read events
05553   // for the response come in before the write events for sending the
05554   // request itself. In state_send_server_request(), we try to disable
05555   // reading until writing the request completed. That turned out to be
05556   // for the second do_io_read(), the way to reenable() reading once
05557   // disabled, but still the result of this do_io_read came in. For this
05558   // read holds: server_entry->read_vio == INT64_MAX
05559   // This block of read events gets undone in setup_server_read_response()
05560 
05561   // Transfer control of the write side as well
05562   server_session->do_io_write(this, 0, NULL);
05563 
05564   // Setup the timeouts
05565   // Set the inactivity timeout to the connect timeout so that we
05566   //   we fail this server if it doesn't start sending the response
05567   //   header
05568   MgmtInt connect_timeout;
05569 
05570   if (t_state.method == HTTP_WKSIDX_POST || t_state.method == HTTP_WKSIDX_PUT) {
05571     connect_timeout = t_state.txn_conf->post_connect_attempts_timeout;
05572   } else if (t_state.current.server == &t_state.parent_info) {
05573     connect_timeout = t_state.http_config_param->parent_connect_timeout;
05574   } else {
05575     connect_timeout = t_state.txn_conf->connect_attempts_timeout;
05576   }
05577   if (t_state.pCongestionEntry != NULL)
05578     connect_timeout = t_state.pCongestionEntry->connect_timeout();
05579 
05580   if (t_state.api_txn_connect_timeout_value != -1) {
05581     server_session->get_netvc()->set_inactivity_timeout(HRTIME_MSECONDS(t_state.api_txn_connect_timeout_value));
05582   } else {
05583     server_session->get_netvc()->set_inactivity_timeout(HRTIME_SECONDS(connect_timeout));
05584   }
05585 
05586   if (t_state.api_txn_active_timeout_value != -1) {
05587     server_session->get_netvc()->set_active_timeout(HRTIME_MSECONDS(t_state.api_txn_active_timeout_value));
05588   } else {
05589     server_session->get_netvc()->set_active_timeout(HRTIME_SECONDS(t_state.txn_conf->transaction_active_timeout_out));
05590   }
05591 
05592   if (plugin_tunnel_type != HTTP_NO_PLUGIN_TUNNEL) {
05593     DebugSM("http_ss", "Setting server session to private");
05594     server_session->private_session = true;
05595   }
05596 }
05597 
05598 void
05599 HttpSM::setup_server_send_request_api()
05600 {
05601   t_state.api_next_action = HttpTransact::SM_ACTION_API_SEND_REQUEST_HDR;
05602   do_api_callout();
05603 }
05604 
05605 void
05606 HttpSM::setup_server_send_request()
05607 {
05608   int hdr_length;
05609   int64_t msg_len = 0;  /* lv: just make gcc happy */
05610 
05611   hsm_release_assert(server_entry != NULL);
05612   hsm_release_assert(server_session != NULL);
05613   hsm_release_assert(server_entry->vc == server_session);
05614 
05615   // Send the request header
05616   server_entry->vc_handler = &HttpSM::state_send_server_request_header;
05617   server_entry->write_buffer = new_MIOBuffer(HTTP_HEADER_BUFFER_SIZE_INDEX);
05618 
05619   if (t_state.api_server_request_body_set) {
05620     msg_len = t_state.internal_msg_buffer_size;
05621     t_state.hdr_info.server_request.value_set_int64(MIME_FIELD_CONTENT_LENGTH, MIME_LEN_CONTENT_LENGTH, msg_len);
05622   }
05623   DUMP_HEADER("http_hdrs", &(t_state.hdr_info.server_request), t_state.state_machine_id, "Proxy's Request after hooks");
05624   // We need a reader so bytes don't fall off the end of
05625   //  the buffer
05626   IOBufferReader *buf_start = server_entry->write_buffer->alloc_reader();
05627   server_request_hdr_bytes = hdr_length =
05628     write_header_into_buffer(&t_state.hdr_info.server_request, server_entry->write_buffer);
05629 
05630   // the plugin decided to append a message to the request
05631   if (t_state.api_server_request_body_set) {
05632     DebugSM("http", "[%" PRId64 "] appending msg of %" PRId64" bytes to request %s", sm_id, msg_len, t_state.internal_msg_buffer);
05633     hdr_length += server_entry->write_buffer->write(t_state.internal_msg_buffer, msg_len);
05634     server_request_body_bytes = msg_len;
05635   }
05636 
05637   // If we are sending authorizations headers, mark the connection private
05638   if (t_state.txn_conf->auth_server_session_private == 1 &&
05639       t_state.hdr_info.server_request.presence(MIME_PRESENCE_AUTHORIZATION | MIME_PRESENCE_PROXY_AUTHORIZATION | MIME_PRESENCE_WWW_AUTHENTICATE)) {
05640       server_session->private_session = true;
05641       DebugSM("http_ss", "Setting server session to private for authorization header");
05642   }
05643   milestones.server_begin_write = ink_get_hrtime();
05644   server_entry->write_vio = server_entry->vc->do_io_write(this, hdr_length, buf_start);
05645 }
05646 
05647 void
05648 HttpSM::setup_server_read_response_header()
05649 {
05650   ink_assert(server_session != NULL);
05651   ink_assert(server_entry != NULL);
05652   // REQ_FLAVOR_SCHEDULED_UPDATE can be transformed in REQ_FLAVOR_REVPROXY
05653   ink_assert(ua_session != NULL ||
05654              t_state.req_flavor == HttpTransact::REQ_FLAVOR_SCHEDULED_UPDATE ||
05655              t_state.req_flavor == HttpTransact::REQ_FLAVOR_REVPROXY);
05656 
05657   // We should have set the server_buffer_reader
05658   //   we sent the request header
05659   ink_assert(server_buffer_reader != NULL);
05660 
05661   // Now that we've got the ability to read from the
05662   //  server, setup to read the response header
05663   server_entry->vc_handler = &HttpSM::state_read_server_response_header;
05664 
05665   t_state.current.state = HttpTransact::STATE_UNDEFINED;
05666   t_state.current.server->state = HttpTransact::STATE_UNDEFINED;
05667 
05668   // Note: we must use destroy() here since clear()
05669   //  does not free the memory from the header
05670   t_state.hdr_info.server_response.destroy();
05671   t_state.hdr_info.server_response.create(HTTP_TYPE_RESPONSE);
05672   http_parser_clear(&http_parser);
05673   server_response_hdr_bytes = 0;
05674   milestones.server_read_header_done = 0;
05675 
05676   // We already done the READ when we setup the connection to
05677   //   read the request header
05678   ink_assert(server_entry->read_vio);
05679 
05680   // If there is anything in the buffer call the parsing routines
05681   //  since if the response is finished, we won't get any
05682   //  additional callbacks
05683 
05684   //UnixNetVConnection * vc = (UnixNetVConnection*)(ua_session->client_vc);
05685   //UnixNetVConnection * my_server_vc = (UnixNetVConnection*)(server_session->get_netvc());
05686   if (server_buffer_reader->read_avail() > 0) {
05687     if (server_entry->eos) {
05688       /*printf("data already in the buffer, calling state_read_server_response_header with VC_EVENT_EOS client fd: %d and server fd : %d\n",
05689          vc->con.fd,my_server_vc->con.fd); */
05690       state_read_server_response_header(VC_EVENT_EOS, server_entry->read_vio);
05691     } else {
05692       /*printf("data alreadyclient_vc in the buffer, calling state_read_server_response_header with VC_EVENT_READ_READY fd: %d and server fd : %d\n",
05693          vc->con.fd,my_server_vc->con.fd); */
05694       state_read_server_response_header(VC_EVENT_READ_READY, server_entry->read_vio);
05695     }
05696   }
05697   // It is possible the header was already in the buffer and the
05698   //   IO on for read disabled.  This would happen if 1) the response
05699   //   header was received before the 'request sent' callback happened
05700   //   (or before a post body was sent) OR  2) we were parsing a 100
05701   //   continue response and are now parsing next response header
05702   //   If only part of the header was in the buffer we need to do more IO
05703   //   to get the rest of it.  If the whole header is in the buffer then we don't
05704   //   want additional IO since we'll be issuing another read for body tunnel
05705   //   and can't switch buffers at that point since we won't be on the read
05706   //   callback
05707   if (server_entry != NULL) {
05708     if (t_state.current.server->state == HttpTransact::STATE_UNDEFINED &&
05709         server_entry->read_vio->nbytes == server_entry->read_vio->ndone &&
05710         milestones.server_read_header_done == 0) {
05711       ink_assert(server_entry->eos == false);
05712       server_entry->read_vio = server_session->do_io_read(this, INT64_MAX, server_buffer_reader->mbuf);
05713     }
05714   }
05715 }
05716 
05717 void
05718 HttpSM::setup_cache_read_transfer()
05719 {
05720   int64_t alloc_index, hdr_size;
05721   int64_t doc_size;
05722 
05723   ink_assert(cache_sm.cache_read_vc != NULL);
05724 
05725   doc_size = t_state.cache_info.object_read->object_size_get();
05726   alloc_index = buffer_size_to_index(doc_size + index_to_buffer_size(HTTP_HEADER_BUFFER_SIZE_INDEX));
05727 
05728 #ifndef USE_NEW_EMPTY_MIOBUFFER
05729   MIOBuffer *buf = new_MIOBuffer(alloc_index);
05730 #else
05731   MIOBuffer *buf = new_empty_MIOBuffer(alloc_index);
05732   buf->append_block(HTTP_HEADER_BUFFER_SIZE_INDEX);
05733 #endif
05734 
05735   buf->water_mark = (int) t_state.txn_conf->default_buffer_water_mark;
05736 
05737   IOBufferReader *buf_start = buf->alloc_reader();
05738 
05739   // Now dump the header into the buffer
05740   ink_assert(t_state.hdr_info.client_response.status_get() != HTTP_STATUS_NOT_MODIFIED);
05741   client_response_hdr_bytes = hdr_size = write_response_header_into_buffer(&t_state.hdr_info.client_response, buf);
05742   cache_response_hdr_bytes = client_response_hdr_bytes;
05743 
05744 
05745   HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::tunnel_handler);
05746 
05747   if (doc_size != INT64_MAX)
05748     doc_size += hdr_size;
05749 
05750   HttpTunnelProducer *p = tunnel.add_producer(cache_sm.cache_read_vc,
05751                                               doc_size, buf_start, &HttpSM::tunnel_handler_cache_read, HT_CACHE_READ,
05752                                               "cache read");
05753   tunnel.add_consumer(ua_entry->vc, cache_sm.cache_read_vc, &HttpSM::tunnel_handler_ua, HT_HTTP_CLIENT, "user agent");
05754   // if size of a cached item is not known, we'll do chunking for keep-alive HTTP/1.1 clients
05755   // this only applies to read-while-write cases where origin server sends a dynamically generated chunked content
05756   // w/o providing a Content-Length header
05757   if ( t_state.client_info.receive_chunked_response ) {
05758     tunnel.set_producer_chunking_action(p, client_response_hdr_bytes, TCA_CHUNK_CONTENT);
05759     tunnel.set_producer_chunking_size(p, t_state.txn_conf->http_chunking_size);
05760   }
05761   ua_entry->in_tunnel = true;
05762   cache_sm.cache_read_vc = NULL;
05763 }
05764 
05765 HttpTunnelProducer *
05766 HttpSM::setup_cache_transfer_to_transform()
05767 {
05768   int64_t alloc_index;
05769   int64_t doc_size;
05770 
05771   ink_assert(cache_sm.cache_read_vc != NULL);
05772   ink_assert(transform_info.vc != NULL);
05773   ink_assert(transform_info.entry->vc == transform_info.vc);
05774 
05775   // grab this here
05776   cache_response_hdr_bytes = t_state.hdr_info.cache_response.length_get();
05777 
05778   doc_size = t_state.cache_info.object_read->object_size_get();
05779   alloc_index = buffer_size_to_index(doc_size);
05780   MIOBuffer *buf = new_MIOBuffer(alloc_index);
05781   IOBufferReader *buf_start = buf->alloc_reader();
05782 
05783 
05784   HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_response_wait_for_transform_read);
05785 
05786   HttpTunnelProducer *p = tunnel.add_producer(cache_sm.cache_read_vc,
05787                                               doc_size,
05788                                               buf_start,
05789                                               &HttpSM::tunnel_handler_cache_read,
05790                                               HT_CACHE_READ,
05791                                               "cache read");
05792 
05793   tunnel.add_consumer(transform_info.vc,
05794                       cache_sm.cache_read_vc, &HttpSM::tunnel_handler_transform_write, HT_TRANSFORM, "transform write");
05795   transform_info.entry->in_tunnel = true;
05796   cache_sm.cache_read_vc = NULL;
05797 
05798   return p;
05799 }
05800 
05801 void
05802 HttpSM::setup_cache_write_transfer(HttpCacheSM * c_sm,
05803                                    VConnection * source_vc, HTTPInfo * store_info, int64_t skip_bytes, const char *name)
05804 {
05805   ink_assert(c_sm->cache_write_vc != NULL);
05806   ink_assert(t_state.request_sent_time > 0);
05807   ink_assert(t_state.response_received_time > 0);
05808 
05809   store_info->request_sent_time_set(t_state.request_sent_time);
05810   store_info->response_received_time_set(t_state.response_received_time);
05811 
05812   c_sm->cache_write_vc->set_http_info(store_info);
05813   store_info->clear();
05814 
05815   tunnel.add_consumer(c_sm->cache_write_vc,
05816                       source_vc, &HttpSM::tunnel_handler_cache_write, HT_CACHE_WRITE, name, skip_bytes);
05817 
05818   c_sm->cache_write_vc = NULL;
05819 }
05820 
05821 void
05822 HttpSM::setup_100_continue_transfer()
05823 {
05824   MIOBuffer *buf = new_MIOBuffer(HTTP_HEADER_BUFFER_SIZE_INDEX);
05825   IOBufferReader *buf_start = buf->alloc_reader();
05826 
05827   // First write the client response header into the buffer
05828   ink_assert(t_state.client_info.http_version != HTTPVersion(0, 9));
05829   client_response_hdr_bytes = write_header_into_buffer(&t_state.hdr_info.client_response, buf);
05830   ink_assert(client_response_hdr_bytes > 0);
05831 
05832   HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::tunnel_handler_100_continue);
05833 
05834   // Setup the tunnel to the client
05835   tunnel.add_producer(HTTP_TUNNEL_STATIC_PRODUCER,
05836                       client_response_hdr_bytes,
05837                       buf_start, (HttpProducerHandler) NULL, HT_STATIC, "internal msg - 100 continue");
05838   tunnel.add_consumer(ua_entry->vc,
05839                       HTTP_TUNNEL_STATIC_PRODUCER,
05840                       &HttpSM::tunnel_handler_100_continue_ua, HT_HTTP_CLIENT, "user agent");
05841 
05842   ua_entry->in_tunnel = true;
05843   tunnel.tunnel_run();
05844 }
05845 
05846 //////////////////////////////////////////////////////////////////////////
05847 //
05848 //  HttpSM::setup_error_transfer()
05849 //
05850 //  The proxy has generated an error message which it
05851 //  is sending to the client. For some cases, however,
05852 //  such as when the proxy is transparent, returning
05853 //  a proxy-generated error message exposes the proxy,
05854 //  destroying transparency. The HttpBodyFactory code,
05855 //  therefore, does not generate an error message body
05856 //  in such cases. This function checks for the presence
05857 //  of an error body. If its not present, it closes the
05858 //  connection to the user, else it simply calls
05859 //  setup_write_proxy_internal, which is the standard
05860 //  routine for setting up proxy-generated responses.
05861 //
05862 //////////////////////////////////////////////////////////////////////////
05863 void
05864 HttpSM::setup_error_transfer()
05865 {
05866   if (t_state.internal_msg_buffer) {
05867     // Since we need to send the error message, call the API
05868     //   function
05869     ink_assert(t_state.internal_msg_buffer_size > 0);
05870     t_state.api_next_action = HttpTransact::SM_ACTION_API_SEND_RESPONSE_HDR;
05871     do_api_callout();
05872   } else {
05873     DebugSM("http", "[setup_error_transfer] Now closing connection ...");
05874     vc_table.cleanup_entry(ua_entry);
05875     ua_entry = NULL;
05876     ua_session = NULL;
05877     terminate_sm = true;
05878     t_state.source = HttpTransact::SOURCE_INTERNAL;
05879   }
05880 }
05881 
05882 void
05883 HttpSM::setup_internal_transfer(HttpSMHandler handler_arg)
05884 {
05885   bool is_msg_buf_present;
05886 
05887   if (t_state.internal_msg_buffer) {
05888     is_msg_buf_present = true;
05889     ink_assert(t_state.internal_msg_buffer_size > 0);
05890 
05891     // Set the content length here since a plugin
05892     //   may have changed the error body
05893     t_state.hdr_info.client_response.set_content_length(t_state.internal_msg_buffer_size);
05894 
05895     // set internal_msg_buffer_type if available
05896     if (t_state.internal_msg_buffer_type) {
05897       int len = strlen(t_state.internal_msg_buffer_type);
05898 
05899       if (len > 0) {
05900         t_state.hdr_info.client_response.value_set(MIME_FIELD_CONTENT_TYPE,
05901                                                    MIME_LEN_CONTENT_TYPE,
05902                                                    t_state.internal_msg_buffer_type, len);
05903       }
05904       ats_free(t_state.internal_msg_buffer_type);
05905       t_state.internal_msg_buffer_type = NULL;
05906     } else {
05907       t_state.hdr_info.client_response.value_set(MIME_FIELD_CONTENT_TYPE,
05908                                                  MIME_LEN_CONTENT_TYPE,
05909                                                  "text/html", 9);
05910     }
05911   } else {
05912     is_msg_buf_present = false;
05913 
05914     // If we are sending a response that can have a body
05915     //   but doesn't have a body add a content-length of zero.
05916     //   Needed for keep-alive on PURGE requests
05917     if (!is_response_body_precluded(t_state.hdr_info.client_response.status_get(), t_state.method)) {
05918       t_state.hdr_info.client_response.set_content_length(0);
05919     }
05920   }
05921 
05922   t_state.source = HttpTransact::SOURCE_INTERNAL;
05923 
05924   int64_t buf_size = index_to_buffer_size(HTTP_HEADER_BUFFER_SIZE_INDEX) + (is_msg_buf_present ? t_state.internal_msg_buffer_size : 0);
05925 
05926   MIOBuffer *buf = new_MIOBuffer(buffer_size_to_index(buf_size));
05927   IOBufferReader *buf_start = buf->alloc_reader();
05928 
05929   // First write the client response header into the buffer
05930   client_response_hdr_bytes = write_response_header_into_buffer(&t_state.hdr_info.client_response, buf);
05931   int64_t nbytes = client_response_hdr_bytes;
05932 
05933   // Next append the message onto the MIOBuffer
05934 
05935   // From HTTP/1.1 RFC:
05936   // "The HEAD method is identical to GET except that the server
05937   // MUST NOT return a message-body in the response. The metainformation
05938   // in the HTTP headers in response to a HEAD request SHOULD be
05939   // identical to the information sent in response to a GET request."
05940   // --> do not append the message onto the MIOBuffer and keep our pointer
05941   // to it so that it can be freed.
05942 
05943   if (is_msg_buf_present && t_state.method != HTTP_WKSIDX_HEAD) {
05944     nbytes += t_state.internal_msg_buffer_size;
05945 
05946     if (t_state.internal_msg_buffer_fast_allocator_size < 0)
05947       buf->append_xmalloced(t_state.internal_msg_buffer, t_state.internal_msg_buffer_size);
05948     else
05949       buf->append_fast_allocated(t_state.internal_msg_buffer,
05950                                  t_state.internal_msg_buffer_size, t_state.internal_msg_buffer_fast_allocator_size);
05951 
05952 
05953     // The IOBufferBlock will xfree the msg buffer when necessary so
05954     //  eliminate our pointer to it
05955     t_state.internal_msg_buffer = NULL;
05956     t_state.internal_msg_buffer_size = 0;
05957   }
05958 
05959 
05960   HTTP_SM_SET_DEFAULT_HANDLER(handler_arg);
05961 
05962   // Setup the tunnel to the client
05963   tunnel.add_producer(HTTP_TUNNEL_STATIC_PRODUCER,
05964                       nbytes, buf_start, (HttpProducerHandler) NULL, HT_STATIC, "internal msg");
05965   tunnel.add_consumer(ua_entry->vc,
05966                       HTTP_TUNNEL_STATIC_PRODUCER, &HttpSM::tunnel_handler_ua, HT_HTTP_CLIENT, "user agent");
05967 
05968   ua_entry->in_tunnel = true;
05969   tunnel.tunnel_run();
05970 }
05971 
05972 // int HttpSM::find_http_resp_buffer_size(int cl)
05973 //
05974 //   Returns the allocation index for the buffer for
05975 //     a response based on the content length
05976 //
05977 int
05978 HttpSM::find_http_resp_buffer_size(int64_t content_length)
05979 {
05980   int64_t buf_size;
05981   int64_t alloc_index;
05982 
05983   if (content_length == HTTP_UNDEFINED_CL) {
05984     // Try use our configured default size.  Otherwise pick
05985     //   the default size
05986     alloc_index = (int) t_state.txn_conf->default_buffer_size_index;
05987     if (alloc_index<MIN_CONFIG_BUFFER_SIZE_INDEX || alloc_index> DEFAULT_MAX_BUFFER_SIZE) {
05988       alloc_index = DEFAULT_RESPONSE_BUFFER_SIZE_INDEX;
05989     }
05990   } else {
05991 #ifdef WRITE_AND_TRANSFER
05992     buf_size = HTTP_HEADER_BUFFER_SIZE + content_length - index_to_buffer_size(HTTP_SERVER_RESP_HDR_BUFFER_INDEX);
05993 #else
05994     buf_size = index_to_buffer_size(HTTP_HEADER_BUFFER_SIZE_INDEX) + content_length;
05995 #endif
05996     alloc_index = buffer_size_to_index(buf_size);
05997   }
05998 
05999   return alloc_index;
06000 }
06001 
06002 // int HttpSM::server_transfer_init()
06003 //
06004 //    Moves data from the header buffer into the reply buffer
06005 //      and return the number of bytes we should use for initiating the
06006 //      tunnel
06007 //
06008 int64_t
06009 HttpSM::server_transfer_init(MIOBuffer * buf, int hdr_size)
06010 {
06011   int64_t nbytes;
06012   int64_t to_copy = INT64_MAX;
06013 
06014   ink_assert(t_state.current.server != NULL); // should have been set up if we're doing a transfer.
06015 
06016   if (server_entry->eos == true) {
06017     // The server has shutdown on us already so the only data
06018     //  we'll get is already in the buffer
06019     nbytes = server_buffer_reader->read_avail() + hdr_size;
06020   } else if (t_state.hdr_info.response_content_length == HTTP_UNDEFINED_CL) {
06021     nbytes = -1;
06022   } else {
06023     //  Set to copy to the number of bytes we want to write as
06024     //  if the server is sending us a bogus response we have to
06025     //  truncate it as we've already decided to trust the content
06026     //  length
06027     to_copy = t_state.hdr_info.response_content_length;
06028     nbytes = t_state.hdr_info.response_content_length + hdr_size;
06029   }
06030 
06031   // Next order of business if copy the remaining data from the
06032   //  header buffer into new buffer.
06033 
06034   int64_t server_response_pre_read_bytes =
06035 #ifdef WRITE_AND_TRANSFER
06036     /* relinquish the space in server_buffer and let
06037        the tunnel use the trailing space
06038      */
06039     buf->write_and_transfer_left_over_space(server_buffer_reader, to_copy);
06040 #else
06041     buf->write(server_buffer_reader, to_copy);
06042 #endif
06043   server_buffer_reader->consume(server_response_pre_read_bytes);
06044 
06045   //  If we know the length & copied the entire body
06046   //   of the document out of the header buffer make
06047   //   sure the server isn't screwing us by having sent too
06048   //   much.  If it did, we want to close the server connection
06049   if (server_response_pre_read_bytes == to_copy && server_buffer_reader->read_avail() > 0) {
06050     t_state.current.server->keep_alive = HTTP_NO_KEEPALIVE;
06051   }
06052 #ifdef LAZY_BUF_ALLOC
06053   // reset the server session buffer
06054   server_session->reset_read_buffer();
06055 #endif
06056   return nbytes;
06057 }
06058 
06059 HttpTunnelProducer *
06060 HttpSM::setup_server_transfer_to_transform()
06061 {
06062   int64_t alloc_index;
06063   int64_t nbytes;
06064 
06065   alloc_index = find_server_buffer_size();
06066   MIOBuffer *buf = new_MIOBuffer(alloc_index);
06067   IOBufferReader *buf_start = buf->alloc_reader();
06068   nbytes = server_transfer_init(buf, 0);
06069 
06070   if (t_state.negative_caching && t_state.hdr_info.server_response.status_get() == HTTP_STATUS_NO_CONTENT) {
06071     int s = sizeof("No Content") - 1;
06072     buf->write("No Content", s);
06073     nbytes += s;
06074   }
06075 
06076   HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_response_wait_for_transform_read);
06077 
06078   HttpTunnelProducer *p = tunnel.add_producer(server_entry->vc,
06079                                               nbytes,
06080                                               buf_start,
06081                                               &HttpSM::tunnel_handler_server,
06082                                               HT_HTTP_SERVER,
06083                                               "http server");
06084 
06085   tunnel.add_consumer(transform_info.vc,
06086                       server_entry->vc, &HttpSM::tunnel_handler_transform_write, HT_TRANSFORM, "transform write");
06087 
06088   server_entry->in_tunnel = true;
06089   transform_info.entry->in_tunnel = true;
06090 
06091   if (t_state.current.server->transfer_encoding == HttpTransact::CHUNKED_ENCODING) {
06092     client_response_hdr_bytes = 0;      // fixed by YTS Team, yamsat
06093     tunnel.set_producer_chunking_action(p, client_response_hdr_bytes, TCA_DECHUNK_CONTENT);
06094   }
06095 
06096   return p;
06097 }
06098 
06099 HttpTunnelProducer *
06100 HttpSM::setup_transfer_from_transform()
06101 {
06102   int64_t alloc_index = find_server_buffer_size();
06103 
06104   // TODO change this call to new_empty_MIOBuffer()
06105   MIOBuffer *buf = new_MIOBuffer(alloc_index);
06106   buf->water_mark = (int) t_state.txn_conf->default_buffer_water_mark;
06107   IOBufferReader *buf_start = buf->alloc_reader();
06108 
06109   HttpTunnelConsumer *c = tunnel.get_consumer(transform_info.vc);
06110   ink_assert(c != NULL);
06111   ink_assert(c->vc == transform_info.vc);
06112   ink_assert(c->vc_type == HT_TRANSFORM);
06113 
06114   // Now dump the header into the buffer
06115   ink_assert(t_state.hdr_info.client_response.status_get() != HTTP_STATUS_NOT_MODIFIED);
06116   client_response_hdr_bytes = write_response_header_into_buffer(&t_state.hdr_info.client_response, buf);
06117 
06118   HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::tunnel_handler);
06119 
06120   HttpTunnelProducer *p = tunnel.add_producer(transform_info.vc,
06121                                               INT64_MAX,
06122                                               buf_start,
06123                                               &HttpSM::tunnel_handler_transform_read,
06124                                               HT_TRANSFORM,
06125                                               "transform read");
06126   tunnel.chain(c, p);
06127 
06128   tunnel.add_consumer(ua_entry->vc, transform_info.vc, &HttpSM::tunnel_handler_ua, HT_HTTP_CLIENT, "user agent");
06129 
06130   transform_info.entry->in_tunnel = true;
06131   ua_entry->in_tunnel = true;
06132 
06133   this->setup_plugin_agents(p);
06134 
06135   if ( t_state.client_info.receive_chunked_response ) {
06136     tunnel.set_producer_chunking_action(p, client_response_hdr_bytes, TCA_CHUNK_CONTENT);
06137     tunnel.set_producer_chunking_size(p, t_state.txn_conf->http_chunking_size);
06138   }
06139 
06140   return p;
06141 }
06142 
06143 
06144 HttpTunnelProducer *
06145 HttpSM::setup_transfer_from_transform_to_cache_only()
06146 {
06147   int64_t alloc_index = find_server_buffer_size();
06148   MIOBuffer *buf = new_MIOBuffer(alloc_index);
06149   IOBufferReader *buf_start = buf->alloc_reader();
06150 
06151   HttpTunnelConsumer *c = tunnel.get_consumer(transform_info.vc);
06152   ink_assert(c != NULL);
06153   ink_assert(c->vc == transform_info.vc);
06154   ink_assert(c->vc_type == HT_TRANSFORM);
06155 
06156   HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::tunnel_handler);
06157 
06158   HttpTunnelProducer *p = tunnel.add_producer(transform_info.vc,
06159                                               INT64_MAX,
06160                                               buf_start,
06161                                               &HttpSM::tunnel_handler_transform_read,
06162                                               HT_TRANSFORM,
06163                                               "transform read");
06164   tunnel.chain(c, p);
06165 
06166   transform_info.entry->in_tunnel = true;
06167 
06168   ink_assert(t_state.cache_info.transform_action == HttpTransact::CACHE_DO_WRITE);
06169 
06170   perform_transform_cache_write_action();
06171 
06172   return p;
06173 }
06174 
06175 void
06176 HttpSM::setup_server_transfer_to_cache_only()
06177 {
06178   TunnelChunkingAction_t action;
06179   int64_t alloc_index;
06180   int64_t nbytes;
06181 
06182   alloc_index = find_server_buffer_size();
06183   MIOBuffer *buf = new_MIOBuffer(alloc_index);
06184   IOBufferReader *buf_start = buf->alloc_reader();
06185 
06186   action = (t_state.current.server && t_state.current.server->transfer_encoding == HttpTransact::CHUNKED_ENCODING) ?
06187     TCA_DECHUNK_CONTENT : TCA_PASSTHRU_DECHUNKED_CONTENT;
06188 
06189   nbytes = server_transfer_init(buf, 0);
06190 
06191   HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::tunnel_handler);
06192 
06193   HttpTunnelProducer *p = tunnel.add_producer(server_entry->vc,
06194                                               nbytes,
06195                                               buf_start,
06196                                               &HttpSM::tunnel_handler_server,
06197                                               HT_HTTP_SERVER,
06198                                               "http server");
06199 
06200   tunnel.set_producer_chunking_action(p, 0, action);
06201   tunnel.set_producer_chunking_size(p, t_state.txn_conf->http_chunking_size);
06202 
06203   setup_cache_write_transfer(&cache_sm, server_entry->vc, &t_state.cache_info.object_store, 0, "cache write");
06204 
06205   server_entry->in_tunnel = true;
06206 }
06207 
06208 void
06209 HttpSM::setup_server_transfer()
06210 {
06211   DebugSM("http", "Setup Server Transfer");
06212   int64_t alloc_index, hdr_size;
06213   int64_t nbytes;
06214 
06215   alloc_index = find_server_buffer_size();
06216 #ifndef USE_NEW_EMPTY_MIOBUFFER
06217   MIOBuffer *buf = new_MIOBuffer(alloc_index);
06218 #else
06219   MIOBuffer *buf = new_empty_MIOBuffer(alloc_index);
06220   buf->append_block(HTTP_HEADER_BUFFER_SIZE_INDEX);
06221 #endif
06222   buf->water_mark = (int) t_state.txn_conf->default_buffer_water_mark;
06223   IOBufferReader *buf_start = buf->alloc_reader();
06224 
06225   // we need to know if we are going to chunk the response or not
06226   // before we write the response header into buffer
06227   TunnelChunkingAction_t action;
06228   if (t_state.client_info.receive_chunked_response == false) {
06229     if (t_state.current.server->transfer_encoding == HttpTransact::CHUNKED_ENCODING)
06230       action = TCA_DECHUNK_CONTENT;
06231     else
06232       action = TCA_PASSTHRU_DECHUNKED_CONTENT;
06233   } else {
06234     if (t_state.current.server->transfer_encoding != HttpTransact::CHUNKED_ENCODING)
06235       if (t_state.client_info.http_version == HTTPVersion(0, 9))
06236         action = TCA_PASSTHRU_DECHUNKED_CONTENT; // send as-is
06237       else
06238         action = TCA_CHUNK_CONTENT;
06239     else
06240       action = TCA_PASSTHRU_CHUNKED_CONTENT;
06241   }
06242   if (action == TCA_CHUNK_CONTENT || action == TCA_PASSTHRU_CHUNKED_CONTENT) {  // remove Content-Length
06243     t_state.hdr_info.client_response.field_delete(MIME_FIELD_CONTENT_LENGTH, MIME_LEN_CONTENT_LENGTH);
06244   }
06245   // Now dump the header into the buffer
06246   ink_assert(t_state.hdr_info.client_response.status_get() != HTTP_STATUS_NOT_MODIFIED);
06247   client_response_hdr_bytes = hdr_size = write_response_header_into_buffer(&t_state.hdr_info.client_response, buf);
06248 
06249   nbytes = server_transfer_init(buf, hdr_size);
06250 
06251   if (t_state.negative_caching && t_state.hdr_info.server_response.status_get() == HTTP_STATUS_NO_CONTENT) {
06252     int s = sizeof("No Content") - 1;
06253     buf->write("No Content", s);
06254     nbytes += s;
06255   }
06256 
06257   HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::tunnel_handler);
06258 
06259   HttpTunnelProducer *p = tunnel.add_producer(server_entry->vc,
06260                                               nbytes,
06261                                               buf_start,
06262                                               &HttpSM::tunnel_handler_server,
06263                                               HT_HTTP_SERVER,
06264                                               "http server");
06265 
06266   tunnel.add_consumer(ua_entry->vc, server_entry->vc, &HttpSM::tunnel_handler_ua, HT_HTTP_CLIENT, "user agent");
06267 
06268   ua_entry->in_tunnel = true;
06269   server_entry->in_tunnel = true;
06270 
06271   this->setup_plugin_agents(p);
06272 
06273   // If the incoming server response is chunked and the client does not
06274   // expect a chunked response, then dechunk it.  Otherwise, if the
06275   // incoming response is not chunked and the client expects a chunked
06276   // response, then chunk it.
06277   /*
06278      // this block is moved up so that we know if we need to remove
06279      // Content-Length field from response header before writing the
06280      // response header into buffer bz50730
06281      TunnelChunkingAction_t action;
06282      if (t_state.client_info.receive_chunked_response == false) {
06283      if (t_state.current.server->transfer_encoding ==
06284      HttpTransact::CHUNKED_ENCODING)
06285      action = TCA_DECHUNK_CONTENT;
06286      else action = TCA_PASSTHRU_DECHUNKED_CONTENT;
06287      }
06288      else {
06289      if (t_state.current.server->transfer_encoding !=
06290      HttpTransact::CHUNKED_ENCODING)
06291      action = TCA_CHUNK_CONTENT;
06292      else action = TCA_PASSTHRU_CHUNKED_CONTENT;
06293      }
06294    */
06295   tunnel.set_producer_chunking_action(p, client_response_hdr_bytes, action);
06296   tunnel.set_producer_chunking_size(p, t_state.txn_conf->http_chunking_size);
06297 }
06298 
06299 void
06300 HttpSM::setup_push_transfer_to_cache()
06301 {
06302   int64_t nbytes, alloc_index;
06303 
06304   alloc_index = find_http_resp_buffer_size(t_state.hdr_info.request_content_length);
06305   MIOBuffer *buf = new_MIOBuffer(alloc_index);
06306   IOBufferReader *buf_start = buf->alloc_reader();
06307 
06308   ink_release_assert(t_state.hdr_info.request_content_length != HTTP_UNDEFINED_CL);
06309   nbytes = t_state.hdr_info.request_content_length - pushed_response_hdr_bytes;
06310   ink_release_assert(nbytes >= 0);
06311 
06312   if (ua_entry->eos == true) {
06313     // The ua has shutdown on us already so the only data
06314     //  we'll get is already in the buffer.  Make sure it
06315     //  fulfills the stated length
06316     int64_t avail = ua_buffer_reader->read_avail();
06317 
06318     if (avail < nbytes) {
06319       // Client failed to send the body, it's gone.  Kill the
06320       // state machine
06321       terminate_sm = true;
06322       return;
06323     }
06324   }
06325   // Next order of business is copy the remaining data from the
06326   //  header buffer into new buffer.
06327   pushed_response_body_bytes = buf->write(ua_buffer_reader, nbytes);
06328   ua_buffer_reader->consume(pushed_response_body_bytes);
06329   client_request_body_bytes += pushed_response_body_bytes;
06330 
06331   HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::tunnel_handler_push);
06332 
06333   // TODO: Should we do something with the HttpTunnelProducer* returned?
06334   tunnel.add_producer(ua_entry->vc, nbytes, buf_start, &HttpSM::tunnel_handler_ua_push,
06335                       HT_HTTP_CLIENT, "user_agent");
06336   setup_cache_write_transfer(&cache_sm, ua_entry->vc, &t_state.cache_info.object_store, 0, "cache write");
06337 
06338   ua_entry->in_tunnel = true;
06339 }
06340 
06341 void
06342 HttpSM::setup_blind_tunnel(bool send_response_hdr)
06343 {
06344   HttpTunnelConsumer *c_ua;
06345   HttpTunnelConsumer *c_os;
06346   HttpTunnelProducer *p_ua;
06347   HttpTunnelProducer *p_os;
06348   MIOBuffer *from_ua_buf = new_MIOBuffer(BUFFER_SIZE_INDEX_32K);
06349   MIOBuffer *to_ua_buf = new_MIOBuffer(BUFFER_SIZE_INDEX_32K);
06350   IOBufferReader *r_from = from_ua_buf->alloc_reader();
06351   IOBufferReader *r_to = to_ua_buf->alloc_reader();
06352 
06353   milestones.server_begin_write = ink_get_hrtime();
06354   if (send_response_hdr) {
06355     client_response_hdr_bytes = write_response_header_into_buffer(&t_state.hdr_info.client_response, to_ua_buf);
06356   } else {
06357     client_response_hdr_bytes = 0;
06358   }
06359 
06360   client_request_body_bytes = 0;
06361   if (ua_raw_buffer_reader != NULL) {
06362     client_request_body_bytes += from_ua_buf->write(ua_raw_buffer_reader, client_request_hdr_bytes);
06363     ua_raw_buffer_reader->dealloc();
06364     ua_raw_buffer_reader = NULL;
06365   }
06366 
06367   // Next order of business if copy the remaining data from the
06368   //  header buffer into new buffer
06369   client_request_body_bytes += from_ua_buf->write(ua_buffer_reader);
06370 
06371   HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::tunnel_handler);
06372 
06373   p_os = tunnel.add_producer(server_entry->vc,
06374                              -1, r_to, &HttpSM::tunnel_handler_ssl_producer, HT_HTTP_SERVER, "http server - tunnel");
06375 
06376   c_ua = tunnel.add_consumer(ua_entry->vc,
06377                              server_entry->vc,
06378                              &HttpSM::tunnel_handler_ssl_consumer, HT_HTTP_CLIENT, "user agent - tunnel");
06379 
06380 
06381   p_ua = tunnel.add_producer(ua_entry->vc,
06382                              -1, r_from, &HttpSM::tunnel_handler_ssl_producer, HT_HTTP_CLIENT, "user agent - tunnel");
06383 
06384   c_os = tunnel.add_consumer(server_entry->vc,
06385                              ua_entry->vc,
06386                              &HttpSM::tunnel_handler_ssl_consumer, HT_HTTP_SERVER, "http server - tunnel");
06387 
06388   // Make the tunnel aware that the entries are bi-directional
06389   tunnel.chain(c_os, p_os);
06390   tunnel.chain(c_ua, p_ua);
06391 
06392   ua_entry->in_tunnel = true;
06393   server_entry->in_tunnel = true;
06394 
06395   tunnel.tunnel_run();
06396 }
06397 
06398 void
06399 HttpSM::setup_plugin_agents(HttpTunnelProducer* p)
06400 {
06401   APIHook* agent = txn_hook_get(TS_HTTP_RESPONSE_CLIENT_HOOK);
06402   has_active_plugin_agents = agent != 0;
06403   while (agent) {
06404     INKVConnInternal* contp = static_cast<INKVConnInternal*>(agent->m_cont);
06405     tunnel.add_consumer(contp, p->vc, &HttpSM::tunnel_handler_plugin_agent, HT_HTTP_CLIENT, "plugin agent");
06406     // We don't put these in the SM VC table because the tunnel
06407     // will clean them up in do_io_close().
06408     agent = agent->next();
06409   }
06410 }
06411 
06412 inline void
06413 HttpSM::transform_cleanup(TSHttpHookID hook, HttpTransformInfo * info)
06414 {
06415   APIHook *t_hook = api_hooks.get(hook);
06416   if (t_hook && info->vc == NULL) {
06417     do {
06418       VConnection *t_vcon = t_hook->m_cont;
06419       t_vcon->do_io_close();
06420       t_hook = t_hook->m_link.next;
06421     } while (t_hook != NULL);
06422   }
06423 }
06424 
06425 void
06426 HttpSM::plugin_agents_cleanup()
06427 {
06428   // If this is set then all of the plugin agent VCs were put in
06429   // the VC table and cleaned up there. This handles the case where
06430   // something went wrong early.
06431   if (!has_active_plugin_agents) {
06432     APIHook* agent = txn_hook_get(TS_HTTP_RESPONSE_CLIENT_HOOK);
06433     while (agent) {
06434       INKVConnInternal* contp = static_cast<INKVConnInternal*>(agent->m_cont);
06435       contp->do_io_close();
06436       agent = agent->next();
06437     }
06438   }
06439 }
06440 
06441 //////////////////////////////////////////////////////////////////////////
06442 //
06443 //  HttpSM::kill_this()
06444 //
06445 //  This function has two phases.  One before we call the asynchronous
06446 //    clean up routines (api and list removal) and one after.
06447 //    The state about which phase we are in is kept in
06448 //    HttpSM::kill_this_async_done
06449 //
06450 //////////////////////////////////////////////////////////////////////////
06451 void
06452 HttpSM::kill_this()
06453 {
06454   ink_release_assert(reentrancy_count == 1);
06455   tunnel.deallocate_redirect_postdata_buffers();
06456   enable_redirection = false;
06457 
06458   if (kill_this_async_done == false) {
06459     ////////////////////////////////
06460     // cancel uncompleted actions //
06461     ////////////////////////////////
06462     // The action should be cancelled only if
06463     // the state machine is in HTTP_API_NO_CALLOUT
06464     // state. This is because we are depending on the
06465     // callout to complete for the state machine to
06466     // get killed.
06467     if (callout_state == HTTP_API_NO_CALLOUT && pending_action) {
06468       pending_action->cancel();
06469       pending_action = NULL;
06470     }
06471 
06472     cache_sm.end_both();
06473     if (second_cache_sm)
06474       second_cache_sm->end_both();
06475     transform_cache_sm.end_both();
06476     vc_table.cleanup_all();
06477     tunnel.deallocate_buffers();
06478 
06479     // It possible that a plugin added transform hook
06480     //   but the hook never executed due to a client abort
06481     //   In that case, we need to manually close all the
06482     //   transforms to prevent memory leaks (INKqa06147)
06483     if (hooks_set) {
06484       transform_cleanup(TS_HTTP_RESPONSE_TRANSFORM_HOOK, &transform_info);
06485       transform_cleanup(TS_HTTP_REQUEST_TRANSFORM_HOOK, &post_transform_info);
06486       plugin_agents_cleanup();
06487     }
06488     // It's also possible that the plugin_tunnel vc was never
06489     //   executed due to not contacting the server
06490     if (plugin_tunnel) {
06491       plugin_tunnel->kill_no_connect();
06492       plugin_tunnel = NULL;
06493     }
06494 
06495     ua_session = NULL;
06496     server_session = NULL;
06497 
06498     // So we don't try to nuke the state machine
06499     //  if the plugin receives event we must reset
06500     //  the terminate_flag
06501     terminate_sm = false;
06502     t_state.api_next_action = HttpTransact::SM_ACTION_API_SM_SHUTDOWN;
06503     do_api_callout();
06504   }
06505   // The reentrancy_count is still valid up to this point since
06506   //   the api shutdown hook is asynchronous and double frees can
06507   //   happen if the reentrancy count is not still valid until
06508   //   after all asynch callouts have completed
06509   //
06510   // Once we get to this point, we could be waiting for async
06511   //   completion in which case we need to decrement the reentrancy
06512   //   count since the entry points can't do it for us since they
06513   //   don't know if the state machine has been destroyed.  In the
06514   //   case we really are done with asynch callouts, decrement the
06515   //   reentrancy count since it seems tacky to destruct a state
06516   //   machine with non-zero count
06517   reentrancy_count--;
06518   ink_release_assert(reentrancy_count == 0);
06519 
06520   // If the api shutdown & list removeal was synchronous
06521   //   then the value of kill_this_async_done has changed so
06522   //   we must check it again
06523   if (kill_this_async_done == true) {
06524     // In the async state, the plugin could have been
06525     // called resulting in the creation of a plugin_tunnel.
06526     // So it needs to be deleted now.
06527     if (plugin_tunnel) {
06528       plugin_tunnel->kill_no_connect();
06529       plugin_tunnel = NULL;
06530     }
06531 
06532     if (t_state.pCongestionEntry != NULL) {
06533       if (t_state.congestion_congested_or_failed != 1) {
06534         t_state.pCongestionEntry->go_alive();
06535       }
06536     }
06537 
06538     ink_assert(pending_action == NULL);
06539     ink_release_assert(vc_table.is_table_clear() == true);
06540     ink_release_assert(tunnel.is_tunnel_active() == false);
06541 
06542     if (t_state.http_config_param->enable_http_stats)
06543       update_stats();
06544 
06545     HTTP_SM_SET_DEFAULT_HANDLER(NULL);
06546 
06547     if (redirect_url != NULL) {
06548       ats_free((void*)redirect_url);
06549       redirect_url = NULL;
06550       redirect_url_len = 0;
06551     }
06552 
06553 #ifdef USE_HTTP_DEBUG_LISTS
06554     ink_mutex_acquire(&debug_sm_list_mutex);
06555     debug_sm_list.remove(this, this->debug_link);
06556     ink_mutex_release(&debug_sm_list_mutex);
06557 #endif
06558 
06559     DebugSM("http", "[%" PRId64 "] deallocating sm", sm_id);
06560 //    authAdapter.destroyState();
06561     destroy();
06562   }
06563 }
06564 
06565 void
06566 HttpSM::update_stats()
06567 {
06568   milestones.sm_finish = ink_get_hrtime();
06569 
06570   if (t_state.cop_test_page && !t_state.http_config_param->record_cop_page) {
06571     DebugSM("http_seq", "Skipping cop heartbeat logging & stats due to config");
06572     return;
06573   }
06574 
06575   //////////////
06576   // Log Data //
06577   //////////////
06578   DebugSM("http_seq", "[HttpSM::update_stats] Logging transaction");
06579   if (Log::transaction_logging_enabled() && t_state.api_info.logging_enabled) {
06580     LogAccessHttp accessor(this);
06581 
06582     int ret = Log::access(&accessor);
06583 
06584     if (ret & Log::FULL) {
06585       DebugSM("http", "[update_stats] Logging system indicates FULL.");
06586     }
06587     if (ret & Log::FAIL) {
06588       Log::error("failed to log transaction for at least one log object");
06589     }
06590   }
06591 
06592   if (is_action_tag_set("bad_length_state_dump")) {
06593     if (t_state.hdr_info.client_response.valid() && t_state.hdr_info.client_response.status_get() == HTTP_STATUS_OK) {
06594       int64_t p_resp_cl = t_state.hdr_info.client_response.get_content_length();
06595       int64_t resp_size = client_response_body_bytes;
06596       if (!((p_resp_cl == -1 || p_resp_cl == resp_size || resp_size == 0))) {
06597         Error("[%" PRId64 "] Truncated content detected", sm_id);
06598         dump_state_on_assert();
06599       }
06600     } else if (client_request_hdr_bytes == 0) {
06601       Error("[%" PRId64 "] Zero length request header received", sm_id);
06602       dump_state_on_assert();
06603     }
06604   }
06605 
06606   if (is_action_tag_set("assert_jtest_length")) {
06607     if (t_state.hdr_info.client_response.valid() && t_state.hdr_info.client_response.status_get() == HTTP_STATUS_OK) {
06608       int64_t p_resp_cl = t_state.hdr_info.client_response.get_content_length();
06609       int64_t resp_size = client_response_body_bytes;
06610       ink_release_assert(p_resp_cl == -1 || p_resp_cl == resp_size || resp_size == 0);
06611     }
06612   }
06613 
06614   ink_hrtime total_time = milestones.sm_finish - milestones.sm_start;
06615 
06616   // ua_close will not be assigned properly in some exceptional situation.
06617   // TODO: Assign ua_close with suitable value when HttpTunnel terminates abnormally.
06618   if (milestones.ua_close == 0 && milestones.ua_read_header_done > 0)
06619     milestones.ua_close = ink_get_hrtime();
06620 
06621   // request_process_time  = The time after the header is parsed to the completion of the transaction
06622   ink_hrtime request_process_time = milestones.ua_close - milestones.ua_read_header_done;
06623 
06624   HttpTransact::client_result_stat(&t_state, total_time, request_process_time);
06625 
06626   ink_hrtime ua_write_time;
06627   if (milestones.ua_begin_write != 0 && milestones.ua_close != 0) {
06628     ua_write_time = milestones.ua_close - milestones.ua_begin_write;
06629   } else {
06630     ua_write_time = -1;
06631   }
06632 
06633   ink_hrtime os_read_time;
06634   if (milestones.server_read_header_done != 0 && milestones.server_close != 0) {
06635     os_read_time = milestones.server_close - milestones.server_read_header_done;
06636   } else {
06637     os_read_time = -1;
06638   }
06639 
06640   // TS-2032: This code is never used, but leaving it here in case we want to add these
06641   // to the metrics code.
06642 #if 0
06643   ink_hrtime cache_lookup_time;
06644   if (milestones.cache_open_read_end != 0 && milestones.cache_open_read_begin != 0) {
06645     cache_lookup_time = milestones.cache_open_read_end - milestones.cache_open_read_begin;
06646   } else {
06647     cache_lookup_time = -1;
06648   }
06649 #endif
06650 
06651   HttpTransact::update_size_and_time_stats(&t_state, total_time, ua_write_time, os_read_time, client_request_hdr_bytes,
06652                                            client_request_body_bytes, client_response_hdr_bytes, client_response_body_bytes,
06653                                            server_request_hdr_bytes, server_request_body_bytes, server_response_hdr_bytes,
06654                                            server_response_body_bytes, pushed_response_hdr_bytes, pushed_response_body_bytes);
06655 /*
06656     if (is_action_tag_set("http_handler_times")) {
06657         print_all_http_handler_times();
06658     }
06659     */
06660 
06661 
06662   // print slow requests if the threshold is set (> 0) and if we are over the time threshold
06663   if (t_state.http_config_param->slow_log_threshold != 0 &&
06664       ink_hrtime_from_msec(t_state.http_config_param->slow_log_threshold) < total_time) {
06665     URL* url = t_state.hdr_info.client_request.url_get();
06666     char url_string[256] = "";
06667     int offset = 0;
06668     int skip = 0;
06669 
06670     t_state.hdr_info.client_request.url_print(url_string, sizeof url_string, &offset, &skip);
06671 
06672     // unique id
06673     char unique_id_string[128] = "";
06674     // [amc] why do we check the URL to get a MIME field?
06675     if (0 != url && url->valid()) {
06676       int length = 0;
06677       const char *field = t_state.hdr_info.client_request.value_get(MIME_FIELD_X_ID, MIME_LEN_X_ID, &length);
06678       if (field != NULL) {
06679         ink_strlcpy(unique_id_string, field, sizeof(unique_id_string));
06680       }
06681     }
06682 
06683     // set the fd for the request
06684     int fd = 0;
06685     NetVConnection *vc = NULL;
06686     if (ua_session != NULL) {
06687       vc = ua_session->get_netvc();
06688       if (vc != NULL) {
06689         fd = vc->get_socket();
06690       } else {
06691         fd = -1;
06692       }
06693     }
06694     // get the status code, lame that we have to check to see if it is valid or we will assert in the method call
06695     int status = 0;
06696     if (t_state.hdr_info.client_response.valid()) {
06697       status = t_state.hdr_info.client_response.status_get();
06698     }
06699     char client_ip[INET6_ADDRSTRLEN];
06700     ats_ip_ntop(&t_state.client_info.addr, client_ip, sizeof(client_ip));
06701     Error("[%" PRId64 "] Slow Request: "
06702           "client_ip: %s:%u "
06703           "url: %s "
06704           "status: %d "
06705           "unique id: %s "
06706           "bytes: %" PRId64 " "
06707           "fd: %d "
06708           "client state: %d "
06709           "server state: %d "
06710           "ua_begin: %.3f "
06711           "ua_read_header_done: %.3f "
06712           "cache_open_read_begin: %.3f "
06713           "cache_open_read_end: %.3f "
06714           "dns_lookup_begin: %.3f "
06715           "dns_lookup_end: %.3f "
06716           "server_connect: %.3f "
06717           "server_first_read: %.3f "
06718           "server_read_header_done: %.3f "
06719           "server_close: %.3f "
06720           "ua_close: %.3f "
06721           "sm_finish: %.3f",
06722           sm_id,
06723           client_ip,
06724           ats_ip_port_host_order(&t_state.client_info.addr),
06725           url_string,
06726           status,
06727           unique_id_string,
06728           client_response_body_bytes,
06729           fd,
06730           t_state.client_info.state,
06731           t_state.server_info.state,
06732           milestone_difference(milestones.sm_start, milestones.ua_begin),
06733           milestone_difference(milestones.sm_start, milestones.ua_read_header_done),
06734           milestone_difference(milestones.sm_start, milestones.cache_open_read_begin),
06735           milestone_difference(milestones.sm_start, milestones.cache_open_read_end),
06736           milestone_difference(milestones.sm_start, milestones.dns_lookup_begin),
06737           milestone_difference(milestones.sm_start, milestones.dns_lookup_end),
06738           milestone_difference(milestones.sm_start, milestones.server_connect),
06739           milestone_difference(milestones.sm_start, milestones.server_first_read),
06740           milestone_difference(milestones.sm_start, milestones.server_read_header_done),
06741           milestone_difference(milestones.sm_start, milestones.server_close),
06742           milestone_difference(milestones.sm_start, milestones.ua_close),
06743           milestone_difference(milestones.sm_start, milestones.sm_finish)
06744       );
06745 
06746   }
06747 }
06748 
06749 
06750 //
06751 // void HttpSM::dump_state_on_assert
06752 //    Debugging routine to dump the state machine's history
06753 //     and other state on an assertion failure
06754 //    We use Diags::Status instead of stderr since
06755 //     Diags works both on UNIX & NT
06756 //
06757 void
06758 HttpSM::dump_state_on_assert()
06759 {
06760   Error("[%" PRId64 "] ------- begin http state dump -------", sm_id);
06761 
06762   int hist_size = this->history_pos;
06763   if (this->history_pos > HISTORY_SIZE) {
06764     hist_size = HISTORY_SIZE;
06765     Error("   History Wrap around. history_pos: %d", this->history_pos);
06766   }
06767   // Loop through the history and dump it
06768   for (int i = 0; i < hist_size; i++) {
06769     int r = history[i].reentrancy;
06770     int e = history[i].event;
06771     Error("%d   %d   %s", e, r, history[i].fileline);
06772   }
06773 
06774   // Dump the via string
06775   Error("Via String: [%s]\n", t_state.via_string);
06776 
06777   // Dump header info
06778   dump_state_hdr(&t_state.hdr_info.client_request, "Client Request");
06779   dump_state_hdr(&t_state.hdr_info.server_request, "Server Request");
06780   dump_state_hdr(&t_state.hdr_info.server_response, "Server Response");
06781   dump_state_hdr(&t_state.hdr_info.transform_response, "Transform Response");
06782   dump_state_hdr(&t_state.hdr_info.client_response, "Client Response");
06783 
06784   Error("[%" PRId64 "] ------- end http state dump ---------", sm_id);
06785 }
06786 
06787 void
06788 HttpSM::dump_state_hdr(HTTPHdr *h, const char *s)
06789 {
06790   // Dump the client request if available
06791   if (h->valid()) {
06792     int l = h->length_get();
06793     char *hdr_buf = (char *)ats_malloc(l + 1);
06794     int index = 0;
06795     int offset = 0;
06796 
06797     h->print(hdr_buf, l, &index, &offset);
06798 
06799     hdr_buf[l] = '\0';
06800     Error("  ----  %s [%" PRId64 "] ----\n%s\n", s, sm_id, hdr_buf);
06801     ats_free(hdr_buf);
06802   }
06803 }
06804 
06805 
06806 
06807 /*****************************************************************************
06808  *****************************************************************************
06809  ****                                                                     ****
06810  ****                       HttpTransact Interface                        ****
06811  ****                                                                     ****
06812  *****************************************************************************
06813  *****************************************************************************/
06814 //////////////////////////////////////////////////////////////////////////
06815 //
06816 //      HttpSM::call_transact_and_set_next_state(f)
06817 //
06818 //      This routine takes an HttpTransact function <f>, calls the function
06819 //      to perform some actions on the current HttpTransact::State, and
06820 //      then uses the HttpTransact return action code to set the next
06821 //      handler (state) for the state machine.  HttpTransact could have
06822 //      returned the handler directly, but returns action codes in hopes of
06823 //      making a cleaner separation between the state machine and the
06824 //      HttpTransact logic.
06825 //
06826 //////////////////////////////////////////////////////////////////////////
06827 
06828 // Where is the goatherd?
06829 
06830 void
06831 HttpSM::call_transact_and_set_next_state(TransactEntryFunc_t f)
06832 {
06833   last_action = t_state.next_action;    // remember where we were
06834 
06835   // The callee can either specify a method to call in to Transact,
06836   //   or call with NULL which indicates that Transact should use
06837   //   its stored entry point.
06838   if (f == NULL) {
06839     ink_release_assert(t_state.transact_return_point != NULL);
06840     t_state.transact_return_point(&t_state);
06841   } else {
06842     f(&t_state);
06843   }
06844 
06845   DebugSM("http", "[%" PRId64 "] State Transition: %s -> %s",
06846         sm_id, HttpDebugNames::get_action_name(last_action), HttpDebugNames::get_action_name(t_state.next_action));
06847 
06848   set_next_state();
06849 
06850   return;
06851 }
06852 
06853 //////////////////////////////////////////////////////////////////////////////
06854 //
06855 //  HttpSM::set_next_state()
06856 //
06857 //  call_transact_and_set_next_state() was broken into two parts, one
06858 //  which calls the HttpTransact method and the second which sets the
06859 //  next state. In a case which set_next_state() was not completed,
06860 //  the state function calls set_next_state() to retry setting the
06861 //  state.
06862 //
06863 //////////////////////////////////////////////////////////////////////////////
06864 void
06865 HttpSM::set_next_state()
06866 {
06867   ///////////////////////////////////////////////////////////////////////
06868   // Use the returned "next action" code to set the next state handler //
06869   ///////////////////////////////////////////////////////////////////////
06870   switch (t_state.next_action) {
06871   case HttpTransact::SM_ACTION_API_READ_REQUEST_HDR:
06872   case HttpTransact::SM_ACTION_API_PRE_REMAP:
06873   case HttpTransact::SM_ACTION_API_POST_REMAP:
06874   case HttpTransact::SM_ACTION_API_OS_DNS:
06875   case HttpTransact::SM_ACTION_API_SEND_REQUEST_HDR:
06876   case HttpTransact::SM_ACTION_API_READ_CACHE_HDR:
06877   case HttpTransact::SM_ACTION_API_READ_RESPONSE_HDR:
06878   case HttpTransact::SM_ACTION_API_SEND_RESPONSE_HDR:
06879   case HttpTransact::SM_ACTION_API_CACHE_LOOKUP_COMPLETE:
06880     {
06881       t_state.api_next_action = t_state.next_action;
06882       do_api_callout();
06883       break;
06884     }
06885 
06886   case HttpTransact::SM_ACTION_POST_REMAP_SKIP:
06887     {
06888       call_transact_and_set_next_state(NULL);
06889       break;
06890     }
06891 
06892   case HttpTransact::SM_ACTION_REMAP_REQUEST:
06893     {
06894       if (!remapProcessor.using_separate_thread()) {
06895         do_remap_request(true); /* run inline */
06896         DebugSM("url_rewrite", "completed inline remapping request for [%" PRId64 "]", sm_id);
06897         t_state.url_remap_success = remapProcessor.finish_remap(&t_state);
06898         call_transact_and_set_next_state(NULL);
06899       } else {
06900         HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_remap_request);
06901         do_remap_request(false);        /* dont run inline (iow on another thread) */
06902       }
06903       break;
06904     }
06905 
06906   case HttpTransact::SM_ACTION_DNS_LOOKUP:
06907     {
06908       sockaddr const* addr;
06909 
06910       if (t_state.api_server_addr_set) {
06911         /* If the API has set the server address before the OS DNS lookup
06912          * then we can skip the lookup
06913          */
06914         ip_text_buffer ipb;
06915         DebugSM("dns", "[HttpTransact::HandleRequest] Skipping DNS lookup for API supplied target %s.\n", ats_ip_ntop(&t_state.server_info.addr, ipb, sizeof(ipb)));
06916         // this seems wasteful as we will just copy it right back
06917         ats_ip_copy(t_state.host_db_info.ip(), &t_state.server_info.addr);
06918         t_state.dns_info.lookup_success = true;
06919         call_transact_and_set_next_state(NULL);
06920         break;
06921       } else if (url_remap_mode == HttpTransact::URL_REMAP_FOR_OS && t_state.first_dns_lookup) {
06922         DebugSM("cdn", "Skipping DNS Lookup");
06923         // skip the DNS lookup
06924         t_state.first_dns_lookup = false;
06925         call_transact_and_set_next_state(HttpTransact::HandleFiltering);
06926         break;
06927       } else  if (t_state.http_config_param->use_client_target_addr == 2
06928         && !t_state.url_remap_success
06929         && t_state.parent_result.r != PARENT_SPECIFIED
06930         && t_state.client_info.is_transparent
06931         && t_state.dns_info.os_addr_style == HttpTransact::DNSLookupInfo::OS_ADDR_TRY_DEFAULT
06932         && ats_is_ip(addr = t_state.state_machine->ua_session->get_netvc()->get_local_addr())
06933       ) {
06934         /* If the connection is client side transparent and the URL
06935          * was not remapped/directed to parent proxy, we can use the
06936          * client destination IP address instead of doing a DNS
06937          * lookup. This is controlled by the 'use_client_target_addr'
06938          * configuration parameter.
06939          */
06940         if (is_debug_tag_set("dns")) {
06941           ip_text_buffer ipb;
06942           DebugSM("dns", "[HttpTransact::HandleRequest] Skipping DNS lookup for client supplied target %s.\n", ats_ip_ntop(addr, ipb, sizeof(ipb)));
06943         }
06944         ats_ip_copy(t_state.host_db_info.ip(), addr);
06945         if (t_state.hdr_info.client_request.version_get() == HTTPVersion(0, 9))
06946           t_state.host_db_info.app.http_data.http_version =  HostDBApplicationInfo::HTTP_VERSION_09;
06947         else if (t_state.hdr_info.client_request.version_get() == HTTPVersion(1, 0))
06948           t_state.host_db_info.app.http_data.http_version =  HostDBApplicationInfo::HTTP_VERSION_10;
06949         else
06950           t_state.host_db_info.app.http_data.http_version =  HostDBApplicationInfo::HTTP_VERSION_11;
06951 
06952         t_state.dns_info.lookup_success = true;
06953         // cache this result so we don't have to unreliably duplicate the
06954         // logic later if the connect fails.
06955         t_state.dns_info.os_addr_style = HttpTransact::DNSLookupInfo::OS_ADDR_TRY_CLIENT;
06956         call_transact_and_set_next_state(NULL);
06957         break;
06958       } else if (t_state.parent_result.r == PARENT_UNDEFINED && t_state.dns_info.lookup_success) {
06959         // Already set, and we don't have a parent proxy to lookup
06960         ink_assert(ats_is_ip(t_state.host_db_info.ip()));
06961         DebugSM("dns", "[HttpTransact::HandleRequest] Skipping DNS lookup, provided by plugin");
06962         call_transact_and_set_next_state(NULL);
06963         break;
06964       } else if (t_state.dns_info.looking_up == HttpTransact::ORIGIN_SERVER &&
06965                  t_state.http_config_param->no_dns_forward_to_parent){
06966 
06967         if (t_state.cop_test_page)
06968           ats_ip_copy(t_state.host_db_info.ip(), t_state.state_machine->ua_session->get_netvc()->get_local_addr());
06969 
06970         t_state.dns_info.lookup_success = true;
06971         call_transact_and_set_next_state(NULL);
06972         break;
06973       }
06974 
06975       HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_hostdb_lookup);
06976 
06977       ink_assert(t_state.dns_info.looking_up != HttpTransact::UNDEFINED_LOOKUP);
06978       do_hostdb_lookup();
06979       break;
06980     }
06981 
06982   case HttpTransact::SM_ACTION_DNS_REVERSE_LOOKUP:
06983     {
06984       HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_hostdb_reverse_lookup);
06985       do_hostdb_reverse_lookup();
06986       break;
06987     }
06988 
06989   case HttpTransact::SM_ACTION_CACHE_LOOKUP:
06990     {
06991       HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_cache_open_read);
06992       do_cache_lookup_and_read();
06993       break;
06994     }
06995 
06996   case HttpTransact::SM_ACTION_ORIGIN_SERVER_OPEN:
06997     {
06998       if (congestionControlEnabled && (t_state.congest_saved_next_action == HttpTransact::SM_ACTION_UNDEFINED)) {
06999         t_state.congest_saved_next_action = HttpTransact::SM_ACTION_ORIGIN_SERVER_OPEN;
07000         HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_congestion_control_lookup);
07001         if (!do_congestion_control_lookup())
07002           break;
07003       }
07004       HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_http_server_open);
07005 
07006       // We need to close the previous attempt
07007       if (server_entry) {
07008         ink_assert(server_entry->vc_type == HTTP_SERVER_VC);
07009         vc_table.cleanup_entry(server_entry);
07010         server_entry = NULL;
07011         server_session = NULL;
07012       } else {
07013         // Now that we have gotten the user agent request, we can cancel
07014         // the inactivity timeout associated with it.  Note, however, that
07015         // we must not cancel the inactivity timeout if the message
07016         // contains a body (as indicated by the non-zero request_content_length
07017         // field).  This indicates that a POST operation is taking place and
07018         // that the client is still sending data to the origin server.  The
07019         // origin server cannot reply until the entire request is received.  In
07020         // light of this dependency, TS must ensure that the client finishes
07021         // sending its request and for this reason, the inactivity timeout
07022         // cannot be cancelled.
07023         if (ua_session && !t_state.hdr_info.request_content_length) {
07024           ua_session->get_netvc()->cancel_inactivity_timeout();
07025         }
07026       }
07027 
07028       do_http_server_open();
07029       break;
07030     }
07031 
07032   case HttpTransact::SM_ACTION_SERVER_PARSE_NEXT_HDR:
07033     {
07034       setup_server_read_response_header();
07035       break;
07036     }
07037 
07038   case HttpTransact::SM_ACTION_INTERNAL_100_RESPONSE:
07039     {
07040       setup_100_continue_transfer();
07041       break;
07042     }
07043 
07044   case HttpTransact::SM_ACTION_SERVER_READ:
07045     {
07046       t_state.source = HttpTransact::SOURCE_HTTP_ORIGIN_SERVER;
07047 
07048       if (transform_info.vc) {
07049         ink_assert(t_state.hdr_info.client_response.valid() == 0);
07050         ink_assert((t_state.hdr_info.transform_response.valid()? true : false) == true);
07051         HttpTunnelProducer *p = setup_server_transfer_to_transform();
07052         perform_cache_write_action();
07053         tunnel.tunnel_run(p);
07054       } else {
07055         ink_assert((t_state.hdr_info.client_response.valid()? true : false) == true);
07056         t_state.api_next_action = HttpTransact::SM_ACTION_API_SEND_RESPONSE_HDR;
07057 
07058         // check to see if we are going to handle the redirection from server response and if there is a plugin hook set
07059         if (hooks_set && is_redirect_required() == false) {
07060           do_api_callout_internal();
07061         } else {
07062           do_redirect();
07063           handle_api_return();
07064         }
07065 
07066       }
07067       break;
07068     }
07069 
07070   case HttpTransact::SM_ACTION_SERVE_FROM_CACHE:
07071     {
07072       ink_assert(t_state.cache_info.action == HttpTransact::CACHE_DO_SERVE ||
07073                  t_state.cache_info.action == HttpTransact::CACHE_DO_SERVE_AND_DELETE ||
07074                  t_state.cache_info.action == HttpTransact::CACHE_DO_SERVE_AND_UPDATE);
07075       release_server_session(true);
07076       t_state.source = HttpTransact::SOURCE_CACHE;
07077 
07078       if (transform_info.vc) {
07079         ink_assert(t_state.hdr_info.client_response.valid() == 0);
07080         ink_assert((t_state.hdr_info.transform_response.valid()? true : false) == true);
07081         t_state.hdr_info.cache_response.create(HTTP_TYPE_RESPONSE);
07082         t_state.hdr_info.cache_response.copy(&t_state.hdr_info.transform_response);
07083 
07084         HttpTunnelProducer *p = setup_cache_transfer_to_transform();
07085         perform_cache_write_action();
07086         tunnel.tunnel_run(p);
07087       } else {
07088         ink_assert((t_state.hdr_info.client_response.valid()? true : false) == true);
07089         t_state.hdr_info.cache_response.create(HTTP_TYPE_RESPONSE);
07090         t_state.hdr_info.cache_response.copy(&t_state.hdr_info.client_response);
07091 
07092         perform_cache_write_action();
07093         t_state.api_next_action = HttpTransact::SM_ACTION_API_SEND_RESPONSE_HDR;
07094 
07095         // check to see if we are going to handle the redirection from server response and if there is a plugin hook set
07096         if (hooks_set && is_redirect_required() == false) {
07097           do_api_callout_internal();
07098         } else {
07099           do_redirect();
07100           handle_api_return();
07101         }
07102       }
07103       break;
07104     }
07105 
07106   case HttpTransact::SM_ACTION_CACHE_ISSUE_WRITE:
07107     {
07108       ink_assert(cache_sm.cache_write_vc == NULL);
07109       HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_cache_open_write);
07110 
07111       do_cache_prepare_write();
07112       break;
07113 
07114     }
07115 
07116   case HttpTransact::SM_ACTION_INTERNAL_CACHE_WRITE:
07117     {
07118       t_state.api_next_action = HttpTransact::SM_ACTION_API_SEND_RESPONSE_HDR;
07119       do_api_callout();
07120       break;
07121     }
07122 
07123   case HttpTransact::SM_ACTION_INTERNAL_CACHE_NOOP:
07124     {
07125       if (server_entry == NULL || server_entry->in_tunnel == false) {
07126         release_server_session();
07127       }
07128       // If we're in state SEND_API_RESPONSE_HDR, it means functions
07129       // registered to hook SEND_RESPONSE_HDR have already been called. So we do not
07130       // need to call do_api_callout. Otherwise TS loops infinitely in this state !
07131       if (t_state.api_next_action == HttpTransact::SM_ACTION_API_SEND_RESPONSE_HDR) {
07132         handle_api_return();
07133       } else {
07134         t_state.api_next_action = HttpTransact::SM_ACTION_API_SEND_RESPONSE_HDR;
07135         do_api_callout();
07136       }
07137       break;
07138     }
07139 
07140   case HttpTransact::SM_ACTION_INTERNAL_CACHE_DELETE:
07141     {
07142       // Nuke all the alternates since this is mostly likely
07143       //   the result of a delete method
07144       cache_sm.end_both();
07145       do_cache_delete_all_alts(NULL);
07146 
07147       release_server_session();
07148       t_state.api_next_action = HttpTransact::SM_ACTION_API_SEND_RESPONSE_HDR;
07149       do_api_callout();
07150       break;
07151     }
07152 
07153   case HttpTransact::SM_ACTION_INTERNAL_CACHE_UPDATE_HEADERS:
07154     {
07155       issue_cache_update();
07156       cache_sm.close_read();
07157 
07158       release_server_session();
07159       t_state.api_next_action = HttpTransact::SM_ACTION_API_SEND_RESPONSE_HDR;
07160       do_api_callout();
07161       break;
07162 
07163     }
07164 
07165   case HttpTransact::SM_ACTION_SEND_ERROR_CACHE_NOOP:
07166     {
07167       setup_error_transfer();
07168       break;
07169     }
07170 
07171 
07172   case HttpTransact::SM_ACTION_INTERNAL_REQUEST:
07173     {
07174       HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_handle_stat_page);
07175       Action *action_handle = statPagesManager.handle_http(this, &t_state.hdr_info.client_request);
07176 
07177       if (action_handle != ACTION_RESULT_DONE) {
07178         pending_action = action_handle;
07179         historical_action = pending_action;
07180       }
07181 
07182       break;
07183     }
07184 
07185   case HttpTransact::SM_ACTION_ORIGIN_SERVER_RR_MARK_DOWN:
07186     {
07187       HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_mark_os_down);
07188 
07189       ink_assert(t_state.dns_info.looking_up == HttpTransact::ORIGIN_SERVER);
07190 
07191       // TODO: This might not be optimal (or perhaps even correct), but it will 
07192       // effectively mark the host as down. What's odd is that state_mark_os_down
07193       // above isn't triggering.
07194       HttpSM::do_hostdb_update_if_necessary();
07195 
07196       do_hostdb_lookup();
07197       break;
07198     }
07199 
07200   case HttpTransact::SM_ACTION_SSL_TUNNEL:
07201     {
07202       setup_blind_tunnel(true);
07203       break;
07204     }
07205 
07206   case HttpTransact::SM_ACTION_ORIGIN_SERVER_RAW_OPEN:{
07207       if (congestionControlEnabled && (t_state.congest_saved_next_action == HttpTransact::SM_ACTION_UNDEFINED)) {
07208         t_state.congest_saved_next_action = HttpTransact::SM_ACTION_ORIGIN_SERVER_RAW_OPEN;
07209         HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_congestion_control_lookup);
07210         if (!do_congestion_control_lookup())
07211           break;
07212       }
07213 
07214       HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_raw_http_server_open);
07215 
07216       ink_assert(server_entry == NULL);
07217       do_http_server_open(true);
07218       break;
07219     }
07220 
07221   case HttpTransact::SM_ACTION_ICP_QUERY:
07222     {
07223       HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_icp_lookup);
07224       do_icp_lookup();
07225       break;
07226     }
07227 
07228   case HttpTransact::SM_ACTION_CACHE_ISSUE_WRITE_TRANSFORM:
07229     {
07230       ink_assert(t_state.cache_info.transform_action == HttpTransact::CACHE_PREPARE_TO_WRITE);
07231 
07232       if (transform_cache_sm.cache_write_vc) {
07233         // We've already got the write_vc that
07234         //  didn't use for the untransformed copy
07235         ink_assert(cache_sm.cache_write_vc == NULL);
07236         ink_assert(t_state.api_info.cache_untransformed == false);
07237         t_state.cache_info.write_lock_state = HttpTransact::CACHE_WL_SUCCESS;
07238         call_transact_and_set_next_state(NULL);
07239       } else {
07240         HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_cache_open_write);
07241 
07242         do_cache_prepare_write_transform();
07243       }
07244       break;
07245     }
07246 
07247   case HttpTransact::SM_ACTION_TRANSFORM_READ:
07248     {
07249       t_state.api_next_action = HttpTransact::SM_ACTION_API_SEND_RESPONSE_HDR;
07250       do_api_callout();
07251       break;
07252     }
07253 
07254   case HttpTransact::SM_ACTION_READ_PUSH_HDR:
07255     {
07256       setup_push_read_response_header();
07257       break;
07258     }
07259 
07260   case HttpTransact::SM_ACTION_STORE_PUSH_BODY:
07261     {
07262       setup_push_transfer_to_cache();
07263       tunnel.tunnel_run();
07264       break;
07265     }
07266 
07267   case HttpTransact::SM_ACTION_CACHE_PREPARE_UPDATE:
07268     {
07269       ink_assert(t_state.api_update_cached_object == HttpTransact::UPDATE_CACHED_OBJECT_CONTINUE);
07270       do_cache_prepare_update();
07271       break;
07272     }
07273   case HttpTransact::SM_ACTION_CACHE_ISSUE_UPDATE:
07274     {
07275       if (t_state.api_update_cached_object == HttpTransact::UPDATE_CACHED_OBJECT_ERROR) {
07276         t_state.cache_info.object_read = NULL;
07277         cache_sm.close_read();
07278       }
07279       issue_cache_update();
07280       call_transact_and_set_next_state(NULL);
07281       break;
07282     }
07283 
07284 #ifdef PROXY_DRAIN
07285   case HttpTransact::SM_ACTION_DRAIN_REQUEST_BODY:
07286     {
07287       do_drain_request_body();
07288       break;
07289     }
07290 #endif /* PROXY_DRAIN */
07291 
07292   case HttpTransact::SM_ACTION_CONTINUE:
07293     {
07294       ink_release_assert(!"Not implemented");
07295     }
07296 
07297   default:
07298     {
07299       ink_release_assert("!Unknown next action");
07300     }
07301   }
07302 }
07303 
07304 
07305 void
07306 clear_http_handler_times()
07307 {
07308 }
07309 
07310 
07311 bool
07312 HttpSM::do_congestion_control_lookup()
07313 {
07314   ink_assert(pending_action == NULL);
07315 
07316   Action *congestion_control_action_handle = get_congest_entry(this, &t_state.request_data, &t_state.pCongestionEntry);
07317   if (congestion_control_action_handle != ACTION_RESULT_DONE) {
07318     pending_action = congestion_control_action_handle;
07319     historical_action = pending_action;
07320     return false;
07321   }
07322   return true;
07323 }
07324 
07325 int
07326 HttpSM::state_congestion_control_lookup(int event, void *data)
07327 {
07328   STATE_ENTER(&HttpSM::state_congestion_control_lookup, event);
07329   if (event == CONGESTION_EVENT_CONTROL_LOOKUP_DONE) {
07330     pending_action = NULL;
07331     t_state.next_action = t_state.congest_saved_next_action;
07332     t_state.transact_return_point = NULL;
07333     set_next_state();
07334   } else {
07335     if (pending_action != NULL) {
07336       pending_action->cancel();
07337       pending_action = NULL;
07338     }
07339     if (t_state.congest_saved_next_action == HttpTransact::SM_ACTION_ORIGIN_SERVER_OPEN) {
07340       return state_http_server_open(event, data);
07341     } else if (t_state.congest_saved_next_action == HttpTransact::SM_ACTION_ORIGIN_SERVER_RAW_OPEN) {
07342       return state_raw_http_server_open(event, data);
07343     }
07344   }
07345   return 0;
07346 }
07347 
07348 
07349 //YTS Team, yamsat Plugin
07350 
07351 void
07352 HttpSM::do_redirect()
07353 {
07354   DebugSM("http_redirect", "[HttpSM::do_redirect]");
07355   if (!enable_redirection || redirection_tries >= HttpConfig::m_master.number_of_redirections) {
07356     tunnel.deallocate_redirect_postdata_buffers();
07357     return;
07358   }
07359 
07360   // if redirect_url is set by an user's plugin, yts will redirect to this url anyway.
07361     if (is_redirect_required()) {
07362     if (redirect_url != NULL || t_state.hdr_info.client_response.field_find(MIME_FIELD_LOCATION, MIME_LEN_LOCATION)) {
07363       if (Log::transaction_logging_enabled() && t_state.api_info.logging_enabled) {
07364         LogAccessHttp accessor(this);
07365         if (redirect_url == NULL) {
07366           if (t_state.squid_codes.log_code == SQUID_LOG_TCP_HIT)
07367             t_state.squid_codes.log_code = SQUID_LOG_TCP_HIT_REDIRECT;
07368           else
07369             t_state.squid_codes.log_code = SQUID_LOG_TCP_MISS_REDIRECT;
07370         } else {
07371           if (t_state.squid_codes.log_code == SQUID_LOG_TCP_HIT)
07372             t_state.squid_codes.log_code = SQUID_LOG_TCP_HIT_X_REDIRECT;
07373           else
07374             t_state.squid_codes.log_code = SQUID_LOG_TCP_MISS_X_REDIRECT;
07375         }
07376 
07377         int ret = Log::access(&accessor);
07378 
07379         if (ret & Log::FULL) {
07380           DebugSM("http", "[update_stats] Logging system indicates FULL.");
07381         }
07382         if (ret & Log::FAIL) {
07383           Log::error("failed to log transaction for at least one log object");
07384         }
07385       }
07386 
07387       if (redirect_url != NULL) {
07388         redirect_request(redirect_url, redirect_url_len);
07389         ats_free((void*)redirect_url);
07390         redirect_url = NULL;
07391         redirect_url_len = 0;
07392         HTTP_INCREMENT_DYN_STAT(http_total_x_redirect_stat);
07393       }
07394       else {
07395         // get the location header and setup the redirect
07396         int redir_len;
07397         char *redir_url =
07398         (char *) t_state.hdr_info.client_response.value_get(MIME_FIELD_LOCATION, MIME_LEN_LOCATION, &redir_len);
07399         redirect_request(redir_url, redir_len);
07400       }
07401 
07402     } else {
07403       enable_redirection = false;
07404     }
07405   } else {
07406     enable_redirection = false;
07407   }
07408 
07409 }
07410 
07411 void
07412 HttpSM::redirect_request(const char *redirect_url, const int redirect_len)
07413 {
07414   DebugSM("http_redirect", "[HttpSM::redirect_request]");
07415   // get a reference to the client request header and client url and check to see if the url is valid
07416   HTTPHdr & clientRequestHeader = t_state.hdr_info.client_request;
07417   URL & clientUrl = *clientRequestHeader.url_get();
07418   if (!clientUrl.valid()) {
07419     return;
07420   }
07421 
07422   t_state.redirect_info.redirect_in_process = true;
07423 
07424   // set the passed in location url and parse it
07425   URL & redirectUrl = t_state.redirect_info.redirect_url;
07426   if (!redirectUrl.valid()) {
07427     redirectUrl.create(NULL);
07428   }
07429 
07430   // reset the path from previous redirects (if any)
07431   t_state.redirect_info.redirect_url.path_set(NULL, 0);
07432 
07433   // redirectUrl.user_set(redirect_url, redirect_len);
07434   redirectUrl.parse(redirect_url, redirect_len);
07435 
07436   // copy the client url to the original url
07437   URL & origUrl = t_state.redirect_info.original_url;
07438   if (!origUrl.valid()) {
07439     origUrl.create(NULL);
07440     origUrl.copy(&clientUrl);
07441   }
07442   // copy the redirect url to the client url
07443   clientUrl.copy(&redirectUrl);
07444 
07445   //(bug 2540703) Clear the previous response if we will attempt the redirect
07446   if (t_state.hdr_info.client_response.valid()) {
07447     // XXX - doing a destroy() for now, we can do a fileds_clear() if we have performance issue
07448     t_state.hdr_info.client_response.destroy();
07449   }
07450 
07451 
07452   bool valid_origHost = true;
07453   int origHost_len, origMethod_len;
07454   char* tmpOrigHost = (char *) t_state.hdr_info.server_request.value_get(MIME_FIELD_HOST, MIME_LEN_HOST, &origHost_len);
07455   char origHost[origHost_len + 1];
07456 
07457   if (tmpOrigHost)
07458     memcpy(origHost, tmpOrigHost, origHost_len);
07459   else
07460     valid_origHost = false;
07461 
07462   origHost[origHost_len] = '\0';
07463   int origPort = t_state.hdr_info.server_request.port_get();
07464 
07465   char *tmpOrigMethod = (char *) t_state.hdr_info.server_request.method_get(&origMethod_len);
07466   char origMethod[origMethod_len + 1];
07467 
07468   if (tmpOrigMethod)
07469     memcpy(origMethod, tmpOrigMethod, origMethod_len);
07470   else
07471     valid_origHost = false;
07472 
07473   int scheme = t_state.next_hop_scheme;
07474   int scheme_len = hdrtoken_index_to_length(scheme);
07475   const char* next_hop_scheme = hdrtoken_index_to_wks(scheme);
07476   char scheme_str[scheme_len+1];
07477 
07478   if (next_hop_scheme)
07479     memcpy(scheme_str, next_hop_scheme, scheme_len);
07480   else
07481     valid_origHost = false;
07482 
07483   t_state.hdr_info.server_request.destroy();
07484   // we want to close the server session
07485   t_state.api_release_server_session = true;
07486   t_state.parent_result.r = PARENT_UNDEFINED;
07487   t_state.request_sent_time = 0;
07488   t_state.response_received_time = 0;
07489   t_state.cache_info.write_lock_state = HttpTransact::CACHE_WL_INIT;
07490   t_state.next_action = HttpTransact::SM_ACTION_REDIRECT_READ;
07491   // we have a new OS and need to have DNS lookup the new OS
07492   t_state.dns_info.lookup_success = false;
07493   t_state.force_dns = false;
07494 
07495   bool noPortInHost = HttpConfig::m_master.redirection_host_no_port;
07496 
07497   // check to see if the client request passed a host header, if so copy the host and port from the redirect url and
07498   // make a new host header
07499   if (t_state.hdr_info.client_request.presence(MIME_PRESENCE_HOST)) {
07500     int host_len;
07501     const char *host = clientUrl.host_get(&host_len);
07502 
07503     if (host != NULL) {
07504       int port = clientUrl.port_get();
07505       char buf[host_len + 7];
07506 
07507       int redirectSchemeLen;
07508       const char* redirectScheme = clientUrl.scheme_get(&redirectSchemeLen);
07509       if (redirectScheme == NULL) {
07510         clientUrl.scheme_set(scheme_str, scheme_len);
07511         DebugSM("http_redirect", "[HttpSM::redirect_request] hsot without scheme %s", buf);
07512       }
07513 
07514       if (noPortInHost) {
07515         int redirectSchemeIdx = clientUrl.scheme_get_wksidx();
07516 
07517         bool defaultPort = (((redirectSchemeIdx == URL_WKSIDX_HTTP) && (port == 80)) ||
07518             ((redirectSchemeIdx == URL_WKSIDX_HTTPS) && (port == 443)));
07519 
07520         if (!defaultPort) noPortInHost = false;
07521       }
07522 
07523       ink_strlcpy(buf, host, host_len + 1);
07524 
07525       if (!noPortInHost) {
07526         char port_buf[6]; // handle upto 5 digit port
07527         buf[host_len++] = ':';
07528 
07529         host_len += ink_small_itoa(port, port_buf, sizeof(port_buf));
07530         ink_strlcat(buf, port_buf, sizeof(buf));
07531       }
07532 
07533       t_state.hdr_info.client_request.m_target_cached = false;
07534       t_state.hdr_info.client_request.value_set(MIME_FIELD_HOST, MIME_LEN_HOST, buf, host_len);
07535     } else {
07536       // the client request didn't have a host, so use the current origin host
07537       if (valid_origHost)
07538       {
07539         // the client request didn't have a host, so use the current origin host
07540         DebugSM("http_redirect", "[HttpSM::redirect_request] keeping client request host %s://%s", next_hop_scheme, origHost);
07541         char* origHost1 = strtok(origHost, ":");
07542         origHost_len = strlen(origHost1);
07543         int origHostPort_len = origHost_len;
07544         char buf[origHostPort_len + 7];
07545         ink_strlcpy(buf, origHost1, origHost_len + 1);
07546 
07547         if (noPortInHost) {
07548           int redirectSchemeIdx = t_state.next_hop_scheme;
07549 
07550           bool defaultPort = (((redirectSchemeIdx == URL_WKSIDX_HTTP) && (origPort == 80)) ||
07551               ((redirectSchemeIdx == URL_WKSIDX_HTTPS) && (origPort == 443)));
07552 
07553           if (!defaultPort) noPortInHost = false;
07554         }
07555 
07556         if (!noPortInHost) {
07557           char port_buf[6]; // handle upto 5 digit port
07558           buf[origHostPort_len++] = ':';
07559           origHostPort_len += ink_small_itoa(origPort, port_buf, sizeof(port_buf));
07560           ink_strlcat(buf, port_buf, sizeof(buf));
07561         }
07562 
07563         url_nuke_proxy_stuff(clientUrl.m_url_impl);
07564         url_nuke_proxy_stuff(t_state.hdr_info.client_request.m_url_cached.m_url_impl);
07565         t_state.hdr_info.client_request.method_set(origMethod, origMethod_len);
07566         if (noPortInHost)
07567           t_state.hdr_info.client_request.value_set(MIME_FIELD_HOST, MIME_LEN_HOST, buf, origHost_len);
07568         else
07569           t_state.hdr_info.client_request.value_set(MIME_FIELD_HOST, MIME_LEN_HOST, buf, origHostPort_len);
07570         t_state.hdr_info.client_request.m_target_cached = false;
07571         clientUrl.scheme_set(scheme_str, scheme_len);
07572       } else {
07573         // the server request didn't have a host, so remove it from the headers
07574         t_state.hdr_info.client_request.field_delete(MIME_FIELD_HOST, MIME_LEN_HOST);
07575       }
07576     }
07577 
07578   }
07579 
07580   DUMP_HEADER("http_hdrs", &t_state.hdr_info.client_request, sm_id, "Framed Client Request..checking");
07581 }
07582 
07583 void
07584 HttpSM::set_http_schedule(Continuation *contp)
07585 {
07586   HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::get_http_schedule);
07587   schedule_cont = contp;
07588 }
07589 
07590 int
07591 HttpSM::get_http_schedule(int event, void * /* data ATS_UNUSED */)
07592 {
07593   bool plugin_lock;
07594   Ptr <ProxyMutex> plugin_mutex;
07595   if (schedule_cont->mutex) {
07596     plugin_mutex = schedule_cont->mutex;
07597     plugin_lock = MUTEX_TAKE_TRY_LOCK(schedule_cont->mutex, mutex->thread_holding);
07598 
07599     if (!plugin_lock) {
07600       HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::get_http_schedule);
07601       ink_assert(pending_action == NULL);
07602       pending_action = mutex->thread_holding->schedule_in(this, HRTIME_MSECONDS(10));
07603       return 0;
07604     }
07605   } else {
07606     plugin_lock = false;
07607   }
07608 
07609   //handle Mutex;
07610   schedule_cont->handleEvent ( event, this);
07611   if (plugin_lock) {
07612     Mutex_unlock(plugin_mutex, mutex->thread_holding);
07613   }
07614 
07615   return 0;
07616 }
07617 
07618 bool
07619 HttpSM::set_server_session_private(bool private_session)
07620 {
07621   if (server_session != NULL) {
07622     server_session->private_session = private_session;
07623     return true;
07624   }
07625   return false;
07626 }
07627 
07628 inline bool
07629 HttpSM::is_private()
07630 {
07631     bool res = false;
07632     if (server_session) {
07633         res = server_session->private_session;
07634     } else if (ua_session) {
07635         HttpServerSession * ss = ua_session->get_server_session();
07636         if (ss) {
07637             res = ss->private_session;
07638         }
07639     }
07640     return res;
07641 }
07642 
07643 // check to see if redirection is enabled and less than max redirections tries or if a plugin enabled redirection
07644 inline bool
07645 HttpSM::is_redirect_required()
07646 {
07647   bool redirect_required = (enable_redirection && (redirection_tries <= HttpConfig::m_master.number_of_redirections));
07648 
07649   DebugSM("http_redirect", "is_redirect_required %u", redirect_required);
07650 
07651   if (redirect_required == true) {
07652     HTTPStatus status = t_state.hdr_info.client_response.status_get();
07653     // check to see if the response from the orgin was a 301, 302, or 303
07654     switch (status)
07655     {
07656     case HTTP_STATUS_MULTIPLE_CHOICES:     //300
07657     case HTTP_STATUS_MOVED_PERMANENTLY:    //301
07658     case HTTP_STATUS_MOVED_TEMPORARILY:    //302
07659     case HTTP_STATUS_SEE_OTHER:            //303
07660     case HTTP_STATUS_USE_PROXY:            //305
07661     case HTTP_STATUS_TEMPORARY_REDIRECT:   //307
07662       redirect_required = true;
07663       break;
07664     default:
07665       redirect_required = false;
07666       break;
07667     }
07668 
07669     // if redirect_url is set by an user's plugin, ats will redirect to this url anyway.
07670     if (redirect_url != NULL) {
07671       redirect_required = true;
07672     }
07673   }
07674   return redirect_required;
07675 }

Generated by  doxygen 1.7.1