# Copyright (c) 2010 Flowerfire, Inc. All Rights Reserved. limelight_flash_media_server = { plugin_version = "2.2" # 2006-04-07 - Created initial plug-in, based on log from Macromedia Flash Media Server 2.0.1 r27. # 2006-08-29 - GMF - 1.0.1beta - Added differencing of cs_bytes # 2006-09-04 - GMF - 1.0.2beta - Added tracking of x-duration # 2006-11-29 - gas - 1.1beta - converted to non-w3c format (with new filename) as Limelight # (www.limelightnetworks.com) are not adding a header that we can easily parse (multiple # spaces in between header field names) # 2007-06-22 - KBB - 1.2beta - Changed sessions to treat each clip as a session by adding a login and # logout event. # 2007-09-14 - 1.2 - KBB - renumbered per new beta policy # 2007-12-03 - 1.3 - KBB - Cloned fix_cs_bytes for sc_bytes, sc_stream_bytes and cs_stream_bytes, # which use the same calculation. Improved calculations based on information about how # connections end. Fixed a bug where field values were skewed because some fields gathered by the # parsing regular expression were not in the list. Instead of changing the expression, I added the # fields as placeholders with comments. # 2008-04-22 - 1.4 - KBB - Check for x_event eq disconnect and x_category eq session for more accurate # session reporting. Switched to collected fields instead of nodes for bytes calculations which # increases database build efficiency. # 2009-01-21 - 1.5 - GMF - Switched to use log.filter_preprocessor for parsing, to handle arbitrary W3C # headers, but still to remove tz field, which is in the header but not the data. # 2009-02-20 - 1.5.1 - GMF - Enhanced filter_preprocessor to support headers with tabs # 2011-05-17 - 1.5.2 - MSG - Edited info lines. # 2012-02-06 - 1.6 - MSG - Removed final step which was throwing an error referring to session paths. # 2013-07-16 - 2.0 - GMF - Switched over to Flash-style snapon approach to numerical fields, and Media Reports. # 2013-08-28 - 2.1 - gas - Added if fieldname ne '' around create_log_field(), in case the fieldname is empty (like the create_database_field() # 2015-02-12 - 2.2 - gas - added client_stream_id calculation for concurrent_streams_wowza snapon requirements info.1.manufacturer = "Limelight Networks" info.1.device = "Flash Media Server" info.1.version.1 = "" # The name of the log format log.format.format_label = "Limelight Flash Media Server Log Format" log.miscellaneous.log_data_type = "http_access" log.miscellaneous.log_format_type = "media_server" # The log is in this format if any of the first ten lines match this regular expression log.format.autodetect_expression = `matches_regular_expression(volatile.log_data_line, '^#Software: Macromedia Flash') or matches_regular_expression(volatile.log_data_line, '^(app-stop|connect|connect-pending|disconnect|play|stop) (application|session|stream) ')` log.format.collected_entry_lifespan = 100000 # The format of dates and times in this log log.format.date_format = "auto" log.format.time_format = "auto" # log.format.ignore_format_lines = true # Fields are separated by tabs log.format.field_separator = " " # Don't time out sessions, discard them for being long, or remove reloads statistics.miscellaneous = { maximum_session_duration = "0" session_timeout = "0" remove_reloads_from_sessions = "false" } # statistics.miscellaneous log.fields = { # x_event = "" # x_category = "" # date = "" # time = "" # tz = "" # x_ctx = "" # s_ip = "" # x_pid = "" # x_cpu_load = "" # x_mem_load = "" # x_adaptor = "" # x_vhost = "" # x_app = "" # x_appinst = "" # x_duration = "" # x_status = "" # c_ip.type = "host" # c_proto = "" # s_uri = "" # cs_uri_stem.type = "page" # cs_uri_query = "" # c_referrer = "" # c_user_agent = "" # c_client_id = "" # cs_bytes = "" # sc_bytes = "" # c_connect_type = "" # not in database # x_sname = "" # x_sname_query = "" # x_suri_query = "" # x_suri_stem = "" # x_suri = "" # x_file_name = "" # x_file_ext = "" # x_file_size = "" # x_file_length = "" # x_spos = "" # cs_stream_bytes = "" # sc_stream_bytes = "" # x_service_name = "" # not in database # x_sc_qos_bytes = "" # not in database # x_comment = "" # not in database # This field contains c_client_id plus x_stream_id client_stream_id = "" session_events = "" session_event_type = "" session_id = "" } # log.fields # This handles #Fields lines, and creates log and database fields from them log.filter_preprocessor = ` if (matches_regular_expression(current_log_line(), '^#Fields:[ ]+(.*)$')) then ( string fields = $1; string fieldname; v.logfieldindex = 1; string numerical_fields = "profiles." . internal.profile_name . ".database.numerical_fields"; #echo("Fields: " . fields); # This subroutine creates a database field subroutine(create_database_field(string fieldname), ( debug_message("create_database_field(" . fieldname . ")\n"); string databasefieldpath = "profiles." . internal.profile_name . ".database.fields." . fieldname; (databasefieldpath . "") = ""; node databasefield = databasefieldpath; # set_subnode_value(databasefield, "label", fieldname); databasefield; )); subroutine(create_log_field(string fieldname, string type, bool withindex), ( # echo("create_log_field(" . fieldname . "; type=" . type . ")"); debug_message("create_log_field(" . fieldname . "; type=" . type . ")\n"); string logfieldpath = "profiles." . internal.profile_name . ".log.fields." . fieldname; (logfieldpath . "") = ""; node logfield = logfieldpath; # set_subnode_value(logfield, "label", fieldname); if (withindex) then ( set_subnode_value(logfield, "index", v.logfieldindex); v.logfieldindex++; ); set_subnode_value(logfield, "subindex", 0); if (type ne '') then set_subnode_value(logfield, "type", type); logfield; )); # Assume there isn't a localtime field until we see one. v.parse_localtime = false; # Remove tz field (it's in the header, but not in the data), e.g. #Fields: x-event x-category date time tz c-connect-type x-ctx x-vhost x-app x-appinst x-duration x-status c-ip c-proto s-uri cs-uri-stem cs-uri-query c-referrer c-user-agent c-client-id cs-bytes sc-bytes x-sname x-sname-query x-suri-query x-suri-stem x-suri x-file-ext x-file-size x-file-length x-spos cs-stream-bytes sc-stream-bytes x-sc-qos-bytes x-comment #stop stream 2008-12-05 12:04:29 normal something_420_236_500kbs-final _defaultVHost_ user1 _definst_ 1 200 204.96.7.34 rtmp rtmp://somewhere.somemone:1935/user/_definst_ rtmp://user.flash.somecdn.net:1935/user/_definst_ - http://www2.userraig.com/SomeStream/volumetrics.swf WIN 9,0,124,0 4842288006662599506 3337 132081 SomeStream_420_236_500kbs-final - - rtmp://user.flash.somecdn.net:1935/user/_definst_/SomeStream_420_236_500kbs-final.flv rtmp://user.flash.somecdn.net:1935/user/_definst_/SomeStream_420_236_500kbs-final.flv flv 42332644 519.471000 8097 0 261652 3342 - fields = replace_all(fields, ' tz ', ' '); # Extract the fields on at a time while (matches_regular_expression(fields, '^([^ ]+)[ ]+([^ ].*)$')) ( string unconverted_fieldname = $1; fields = $2; # Clean up the field name fieldname = ''; for (int i = 0; i < length(unconverted_fieldname); i++) ( string c = lowercase(substr(unconverted_fieldname, i, 1)); if (!matches_regular_expression(c, '^[a-z0-9]$')) then c = '_'; fieldname .= c; ); while (matches_regular_expression(fieldname, '^(.*)_$')) fieldname = $1; # Get the log field type string log_field_type = ''; if (fieldname eq 'cs_uri_path') then log_field_type = 'page'; if (fieldname eq 'cs_user_agent') then log_field_type = 'agent'; # Create the log field if (fieldname ne '') then create_log_field(fieldname, log_field_type, true); if (fieldname eq "localtime") then v.parse_localtime = true; # If we're creating a profile, create the database fields too. if (node_exists("volatile.creating_profile")) then ( # Handle localtime by creating date_time and derived database fields if (fieldname eq "localtime") then ( create_log_field('date', '', false); create_log_field('time', '', false); create_database_field('date_time'); create_database_field('day_of_week'); create_database_field('hour_of_day'); # ("profiles." . internal.profile_name . ".log.parsing_filters.parse_localtime.disabled") = false; ); # if localtime # Handle date by creating date_time and derived database fields else if (fieldname eq "date") then ( create_log_field('localtime', '', false); # placeholder - 7/Nov/2006 - KBB create_database_field('date_time'); create_database_field('day_of_week'); create_database_field('hour_of_day'); # ("profiles." . internal.profile_name . ".log.parsing_filters.parse_localtime.disabled") = true; ); # if date else if (fieldname eq "time") then ( create_database_field('date_time'); create_database_field('day_of_week'); create_database_field('hour_of_day'); # ("profiles." . internal.profile_name . ".log.parsing_filters.parse_localtime.disabled") = true; ); # if time # Create derived field for agent else if (fieldname eq "cs_user_agent") then ( create_database_field('operating_system'); create_database_field('web_browser'); ); # Create derived file type field else if (fieldname eq "cs_uri_path") then ( create_database_field('file_type'); ); # Don't add a database field for numerical fields # else if (subnode_exists('database.fields', fieldname)) then ( else if (subnode_exists(numerical_fields, fieldname)) then ( debug_message("Not adding numerical field: " . fieldname . "\n"); ); # Create a normal database field else if (fieldname ne '') then create_database_field(fieldname); ); # if creating profile #echo("now fields=" . fields); ); # while another field # Don't parse the #Fields line as a data line 'reject'; ); # if #Fields # Don't parse any other # lines as data lines else if (starts_with(current_log_line(), '#')) then ( 'reject'; ); ` # log.filter_initialization = ` #string visitor_id; ## For bytes calculations #v.last_cs_bytes = ""; #v.last_sc_bytes = ""; #v.last_cs_stream_bytes = ""; #v.last_sc_stream_bytes = ""; #float cs_bytes_for_db; #float sc_bytes_for_db; #float cs_stream_bytes_for_db; #float sc_stream_bytes_for_db; ## For sessions #int login_date_time_epoc; #int logout_date_time_epoc; #string logout_date_time; #string session_id; #int session_id_counter = 0; #int date_offset_seconds = log.processing.date_offset * (60*60); #` # log.parsing_filters = { calculate_client_stream_id = { label = "calculate client_stream_id" value = ` #if (x_stream_id eq '-') then #client_stream_id = '-' else #client_stream_id = c_client_id . '-' . x_stream_id client_stream_id = c_client_id ` requires_fields = { c_client_id = true # x_stream_id = true } # requires_fields } # calculate_client_stream_id # # # Logs will have c_ip or c_client_id or both. Set visitor_id, used in other filters # # based on whichever is available. c_client_id will be used if both exist. # # set_visitor_to_c_ip = { # value = `visitor_id = replace_all(c_ip, '.', '_');` # requires_fields = { # c_ip = true # } # } # set_visitor_to_c_ip # # # Keep this filter 2nd. c_client_id is the more precise id. # set_visitor_to_c_client_id = { # value = `visitor_id = c_client_id;` # requires_fields = { # c_client_id = true # } # } # set_visitor_to_c_client_id # # # This filter adds a "login" event at the logged time, so sessions can be calculated based # # on connection time. The "normal" event will be added normally, but this also subtracts the # # duration from the date and time, so it logs the event at the time it *connected*, and the # # logout at the time it *disconnected*. # add_login_event = { # # value = ` # #if (x_event eq "disconnect" and x_category eq "session" and # visitor_id ne '-' and x_duration ne '-') then ( # # # Compute the session id # session_id = visitor_id . '_' . session_id_counter; # session_id_counter++; # # # Add the login event. This has the same cs_uri_stem as the logout event, so it can be # # correllated that way. # set_collected_field('', 'date', date); # set_collected_field('', 'time', time); # set_collected_field('', 'session_event_type', 'login'); # set_collected_field('', 'session_id', session_id); # set_collected_field('', 'cs_uri_stem', cs_uri_stem); # set_collected_field('', 'session_events', 1); # set_collected_field('', 'events', 0); # accept_collected_entry('', false); # # # Compute the date_time of the login event. # # Note that we need to add date_offset_seconds to apply the date_offset value. # login_date_time_epoc = date_time_to_epoc(normalize_date(date, 'auto') . ' ' . time); # login_date_time_epoc += date_offset_seconds; # logout_date_time_epoc = login_date_time_epoc + x_duration; # logout_date_time = epoc_to_date_time(logout_date_time_epoc); # # # Set up to make the next event a logout # session_event_type = '(logout)'; # session_events = 1; #); ##events = 1; # This happens in the log filter so all events are marked #` # requires_fields = { # date = true # time = true # x_duration = true # cs_uri_stem = true # } # # } # add_login_event # # set_logout_date_time = ` # #if (session_event_type eq '(logout)') then ( # #date_time = logout_date_time; # must do these separately, since this will be re-derived # if (matches_regular_expression(logout_date_time, '^([^ ]+) ([^ ]+)$')) then ( # date = $1; # time = $2; # ); #); #` # # # Fix the cs_bytes field by subtracting this value from the previous one (it's a running total, # # which otherwise will be aggregated to give too-large numbers). # fix_cs_bytes = { # value = ` #if (visitor_id ne "(empty)" and visitor_id ne "-") then ( # # # If there was a previous value, use the difference in the database entry # v.last_cs_bytes = get_collected_field(visitor_id, 'last_cs_bytes'); # #if (subnode_exists('v.last_cs_bytes', visitor_id)) then ( # if (v.last_cs_bytes ne '') then ( # # cs_bytes_for_db = 0.0 + cs_bytes - v.last_cs_bytes; # if (cs_bytes_for_db < 0) then ( # cs_bytes_for_db = cs_bytes; # ); # ); # else ( # cs_bytes_for_db = cs_bytes; # ); # # # Remember the current cs_bytes value for a later event for this visitor # if (x_event eq "disconnect") then ( # #set_subnode_value('v.last_cs_bytes', visitor_id, 0); # set_collected_field(visitor_id, 'last_cs_bytes', 0); # ); # else ( # #set_subnode_value('v.last_cs_bytes', visitor_id, cs_bytes); # set_collected_field(visitor_id, 'last_cs_bytes', cs_bytes); # ); # # # In the database, the cs_bytes field should be the difference # cs_bytes = cs_bytes_for_db; # #); # if visitor_id #else ( # cs_bytes = 0; #); #` # requires_fields = { # cs_bytes = true # x_event = true # } # } # fix_cs_bytes # # # Fix the sc_bytes field by subtracting this value from the previous one (it's a running total, # # which otherwise will be aggregated to give too-large numbers). # fix_sc_bytes = { # value = ` #if (visitor_id ne "(empty)" and visitor_id ne "-") then ( # # # If there was a previous value, use the difference in the database entry # v.last_sc_bytes = get_collected_field(visitor_id, 'last_sc_bytes'); # #if (subnode_exists('v.last_sc_bytes', visitor_id)) then ( # if (v.last_sc_bytes ne '') then ( # # #sc_bytes_for_db = 0.0 + sc_bytes - node_value(subnode_by_name('v.last_sc_bytes', visitor_id)); # sc_bytes_for_db = 0.0 + sc_bytes - v.last_sc_bytes; # if (sc_bytes_for_db < 0) then ( # sc_bytes_for_db = sc_bytes; # ); # ); # else ( # sc_bytes_for_db = sc_bytes; # ); # # # Remember the current sc_bytes value for a later event for this visitor # if (x_event eq "disconnect") then ( # #set_subnode_value('v.last_sc_bytes', visitor_id, 0); # set_collected_field(visitor_id, 'last_sc_bytes', 0); # ); # else ( # #set_subnode_value('v.last_sc_bytes', visitor_id, sc_bytes); # set_collected_field(visitor_id, 'last_sc_bytes', sc_bytes); # ); # # # In the database, the sc_bytes field should be the difference # sc_bytes = sc_bytes_for_db; # #); # if visitor_id #else ( # sc_bytes = 0; #); #` # requires_fields = { # sc_bytes = true # x_event = true # } # } # fix_sc_bytes # # # Fix the sc_stream_bytes field by subtracting this value from the previous one (it's a running total, # # which otherwise will be aggregated to give too-large numbers). # fix_sc_stream_bytes = { # value = ` #if (visitor_id ne "(empty)" and visitor_id ne "-") then ( # # # If there was a previous value, use the difference in the database entry # v.last_sc_stream_bytes = get_collected_field(visitor_id, 'last_sc_stream_bytes'); # #if (subnode_exists('v.last_sc_stream_bytes', visitor_id)) then ( # if (v.last_sc_stream_bytes ne '') then ( # # #sc_stream_bytes_for_db = 0.0 + sc_stream_bytes - node_value(subnode_by_name('v.last_sc_stream_bytes', visitor_id)); # sc_stream_bytes_for_db = 0.0 + sc_stream_bytes - v.last_sc_stream_bytes; # if (sc_stream_bytes_for_db < 0) then ( # sc_stream_bytes_for_db = sc_stream_bytes; # ); # ); # else ( # sc_stream_bytes_for_db = sc_stream_bytes; # ); # # # Remember the current sc_stream_bytes value for a later event for this visitor # if (x_event eq "stop") then ( # #set_subnode_value('v.last_sc_stream_bytes', visitor_id, 0); # set_collected_field(visitor_id, 'last_sc_stream_bytes', 0); # ); # else ( # #set_subnode_value('v.last_sc_stream_bytes', visitor_id, sc_stream_bytes); # set_collected_field(visitor_id, 'last_sc_stream_bytes', sc_stream_bytes); # ); # # # In the database, the sc_stream_bytes field should be the difference # sc_stream_bytes = sc_stream_bytes_for_db; # #); # if visitor_id #else ( # sc_stream_bytes = 0; #); #` # requires_fields = { # sc_stream_bytes = true # x_event = true # } # } # fix_sc_stream_bytes # # # Fix the cs_stream_bytes field by subtracting this value from the previous one (it's a running total, # # which otherwise will be aggregated to give too-large numbers). # fix_cs_stream_bytes = { # value = ` #if (visitor_id ne "(empty)" and visitor_id ne "-") then ( # # # If there was a previous value, use the difference in the database entry # v.last_cs_stream_bytes = get_collected_field(visitor_id, 'last_cs_stream_bytes'); # #if (subnode_exists('v.last_cs_stream_bytes', visitor_id)) then ( # if (v.last_cs_stream_bytes ne '') then ( # # #cs_stream_bytes_for_db = 0.0 + cs_stream_bytes - node_value(subnode_by_name('v.last_cs_stream_bytes', visitor_id)); # cs_stream_bytes_for_db = 0.0 + cs_stream_bytes - v.last_cs_stream_bytes; # if (cs_stream_bytes_for_db < 0) then ( # cs_stream_bytes_for_db = cs_stream_bytes; # ); # ); # else ( # cs_stream_bytes_for_db = cs_stream_bytes; # ); # # # Remember the current cs_stream_bytes value for a later event for this visitor # if (x_event eq "unpublish") then ( # #set_subnode_value('v.last_cs_stream_bytes', visitor_id, 0); # set_collected_field(visitor_id, 'last_cs_stream_bytes', 0); # ); # else ( # #set_subnode_value('v.last_cs_stream_bytes', visitor_id, cs_stream_bytes); # set_collected_field(visitor_id, 'last_cs_stream_bytes', cs_stream_bytes); # ); # # # In the database, the cs_stream_bytes field should be the difference # cs_stream_bytes = cs_stream_bytes_for_db; # #); # if visitor_id #else ( # cs_stream_bytes = 0; #); #` # requires_fields = { # cs_stream_bytes = true # x_event = true # } # } # fix_cs_stream_bytes # # set_duration = { # value = ` #if ((x_event eq "stop") and (x_category eq "stream")) then # stream_duration = x_duration; #else if ((x_event eq "disconnect") and (x_category eq "session")) then # session_duration = x_duration; #` # requires_fields = { # x_duration = true # x_event = true # x_category = true # stream_duration = true # } # } # set_duration # } # log.parsing_filters log.filters = { mark_entry = { label = '$lang_admin.log_filters.mark_entry_label' comment = '$lang_admin.log_filters.mark_entry_comment' value = 'events = 1;' } # mark_entry } # log.filters database.fields = { # x_event = "" # x_category = "" # date_time = "" # day_of_week = "" # hour_of_day = "" # tz = "" # x_ctx = "" # s_ip = "" # x_pid = "" # x_cpu_load = "" # x_mem_load = "" # x_adaptor = "" # x_vhost = "" # x_app = "" # x_appinst = "" # x_status = "" # c_ip = "" # c_proto = "" # s_uri = "" # cs_uri_stem = "" # cs_uri_query = "" # file_type = "" # worm = "" # location = "" # c_referrer = "" # c_user_agent = "" # c_client_id = "" # x_sname = "" # x_sname_query = "" # x_suri_query = "" # x_suri_stem = "" # x_suri = "" # x_file_name = "" # x_file_ext = "" # x_spos = "" session_event_type = "" session_id = "" } # database.fields log.field_options = { sessions_page_field = "session_event_type" sessions_visitor_id_field = "session_id" sessions_event_field = "session_events" } # log.field_options database.numerical_fields = { events = { default = true requires_log_field = false } # session_events = { # default = true # requires_log_field = false # } # session_events visitors = { default = false requires_log_field = true log_field = "c_client_id" type = "unique" } # visitors # sc_bytes = { # type = "int" # integer_bits = 64 # display_format_type = "bandwidth" # } # cs_bytes = { # type = "int" # integer_bits = 64 # display_format_type = "bandwidth" # } # sc_stream_bytes = { # type = "int" # integer_bits = 64 # display_format_type = "bandwidth" # } # # cs_stream_bytes = { # type = "int" # integer_bits = 64 # display_format_type = "bandwidth" # } # # x_file_size = { # type = "int" # integer_bits = 64 # display_format_type = "bandwidth" # } # # x_file_length = { # type = "int" # integer_bits = 64 # display_format_type = "bandwidth" # } # # x_duration = { # default = false # requires_log_field = false # type = "int" # integer_bits = 64 # display_format_type = "duration_compact" # } # # stream_duration = { # default = false # requires_log_field = false # type = "int" # integer_bits = 64 # display_format_type = "duration_compact" # } # # session_duration = { # default = false # requires_log_field = false # type = "int" # integer_bits = 64 # display_format_type = "duration_compact" # } # # stream_duration_per_event = { # default = false # log_field = "stream_duration" # requires_log_field = true # type = "int" # integer_bits = 64 # aggregation_method = "average" # average_denominator_field = "events" # display_format_type = duration_compact # } # # stream_duration_per_visitor = { # default = false # log_field = "stream_duration" # requires_log_field = true # type = "int" # integer_bits = 64 # aggregation_method = "average" # average_denominator_field = "visitors" # display_format_type = duration_compact # } } # database.numerical_fields create_profile_wizard_options = { # How the reports should be grouped in the report menu report_groups = { date_time_group = { tz = true } content_group = { cs_uri_stem = true cs_uri_query = true s_uri = true x_ctx = true x_sname = true x_sname_query = true x_suri_query = true x_suri_stem = true x_suri = true file_type = true } } # report_groups snapons = { # Attach a play_duration_wowza snapon to compute play_duration play_duration = { snapon = "play_duration_wowza" name = "play_duration" label = "$lang_stats.field_labels.play_duration" parameters = { date_time_field.parameter_value = "date_time" duration_field.parameter_value = "x_duration" session_id_field.parameter_value = "c_client_id" x_category_field.parameter_value = "x_category" x_event_field.parameter_value = "x_event" x_category_start_value.parameter_value = "stream" x_category_end_value.parameter_value = "stream" x_event_start_value.parameter_value = "play" x_event_start_value2.parameter_value = "unpause" x_event_end_value.parameter_value = "stop" x_event_end_value2.parameter_value = "pause" play_duration_name = { parameter_value = "$lang_stats.field_labels.play_duration" final_node_name = "play_duration" } } # parameters requires_log_fields = { x_duration = true date_time = true c_client_id = true } } # play_duration # Attach a cumulative_field_sum snapon to compute sc_stream_bytes sc_stream_bytes = { snapon = "cumulative_field_sum" name = "sc_stream_bytes_total" label = "$lang_admin.snapons.plugins.wowza_media_server.sc_stream_bytes" parameters = { date_time_field.parameter_value = "date_time" cumulative_field.parameter_value = "sc_stream_bytes" session_id_field.parameter_value = "c_client_id" cumulative_field_sum_name = { parameter_value = "$lang_admin.snapons.plugins.wowza_media_server.sc_stream_bytes" final_node_name = "sc_stream_bytes_total" } } # parameters requires_log_fields = { sc_stream_bytes = true date_time = true c_client_id = true # 2011-05-23 - GMF - Can't use this field--it doesn't always exist, and if it doesn't, this snapon won't be attached, resulting in the field being summed. # x_cf_client_id = true } } # sc_stream_bytes # 2013-02-08 - GMF - Removing this, because who really cares about client-to-server streaming bytes anyway? It can be attached as a snapon if they want it. # Attach a cumulative_field_sum snapon to compute cs_stream_bytes # 2013-04-11 - GMF - Turns out someone does care about it (SG). cs_stream_bytes = { snapon = "cumulative_field_sum" name = "cs_stream_bytes_total" label = "$lang_admin.snapons.plugins.wowza_media_server.cs_stream_bytes" parameters = { date_time_field.parameter_value = "date_time" cumulative_field.parameter_value = "cs_stream_bytes" session_id_field.parameter_value = "c_client_id" cumulative_field_sum_name = { parameter_value = "$lang_admin.snapons.plugins.wowza_media_server.cs_stream_bytes" final_node_name = "cs_stream_bytes_total" } } # parameters requires_log_fields = { cs_stream_bytes = true date_time = true c_client_id = true # 2011-05-23 - GMF - Can't use this field--it doesn't always exist, and if it doesn't, this snapon won't be attached, resulting in the field being summed. # x_cf_client_id = true } } # cs_stream_bytes # Attach a media_reports snapon media_reports = { snapon = "media_reports" name = "media_reports" label = "$lang_admin.snapons.media_reports.label" parameters = { user_field.parameter_value = "c_ip" # have_category_field.parameter_value = false # category_field.parameter_value = "category" # domain_field.parameter_value = "top_level_domain" duration_field.parameter_value = "play_duration" stream_name_field.parameter_value = "x_sname" have_stream_id_field.parameter_value = false #stream_id_field.parameter_value = "x_stream_id" client_ip_field.parameter_value = "c_ip" # Don't do the default concurrent connections analysis; Wowza has its own, better one (concurrent_streams_wowza, below) attach_concurrent_events.parameter_value = false } # parameters } # media_reports # Attach the referrer_analysis snapon referrer_analysis = { snapon = "referrer_analysis" name = "referrer_analysis" label = "$lang_admin.snapons.referrer_analysis.label" prompt_to_attach = true prompt_to_attach_default = false parameters = { referrer_field.parameter_value = "c_referrer" } # parameters requires_database_fields = { c_referrer = true } # requires_database_fields } # referrer_analysis # Attach a concurrent_streams_wowza snapon to compute concurrent connections concurrent_streams_wowza = { snapon = "concurrent_streams_wowza" name = "concurrent_streams_wowza" label = "$lang_admin.snapons.plugins.wowza_media_server.concurrent_streams" parameters = { date_time_field.parameter_value = "date_time" x_category_field.parameter_value = "x_category" x_event_field.parameter_value = "x_event" x_category_start_value.parameter_value = "stream" x_category_end_value.parameter_value = "stream" x_event_start_value.parameter_value = "play" x_event_end_value.parameter_value = "stop" x_event_end_value2.parameter_value = "pause" concurrent_streams_name = { parameter_value = "$lang_admin.snapons.plugins.wowza_media_server.concurrent_streams" final_node_name = "concurrent_streams" } # track_per_resource.parameter_value = true # resource_field.parameter_value = "x_sname" } # parameters requires_database_fields = { date_time = true x_category = true x_event = true } } # concurrent_streams_wowza } # snapons } # create_profile_wizard_options } # limelight_flash_media_server