Feb 162014
 

Edit: This post is pretty old and Elasticsearch/Logstash/Kibana have evolved a lot since it was written.

I have been on a logging kick (or obsession) lately. See the previous series of posts.

I’ll start with a picture. This is seriously cool. If you’re running pfsense, you want this.
pfsense-kibana

BACKGROUND
My home network is pretty boring. Network is 192.168.1.0/24. Router is 192.168.1.254. Logstash is installed on 192.168.1.126.

I’m running pfsense version 2.1 and keeping current on updates.
pfsense-version

So in my pfsense admin gui, in Status -> System Logs, in the Settings tab, check the box for “Send log messages to remote syslog server”. In Server 1, I point it to my logstash server on port 514. so IP:Port.
pfsense-status_system_logs_settings

I’m forwarding everything because even if I don’t parse everything useful right away, its still easy to search.

I’m not using a distributed setup for this. I’m just taking the pfsense syslog input and parsing a few things and passing to elasticsearch on the same computer.

I’m sure you will want to add more to this configuration. Check out Grokdebug. I probably wouldn’t have been able to do this without Grokdebug.
http://grokdebug.herokuapp.com

And of course the logstash documentation!
http://logstash.net/docs/1.3.3/

Advertisement:

You will want to change if [host] =~ /192\.168\.1\.254/ to reflect the IP address of your pfsense box.

logstash.conf

input {
    tcp {
        type => syslog
        port => 514
    }
    udp {
        type => syslog
        port => 514
    }

}

filter {
	if [host] =~ /192\.168\.1\.254/ {
		grok {
			add_tag => [ "firewall" ]
			match => [ "message", "<(?<evtid>.*)>(?<datetime>(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\s+(?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9]) (?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:[0-5][0-9])) (?<prog>.*?): (?<msg>.*)" ]
		}
		mutate {
			gsub => ["datetime","  "," "]
		}
		date {
			match => [ "datetime", "MMM dd HH:mm:ss" ]
		}
		mutate {
			replace => [ "message", "%{msg}" ]
		}
		mutate {
			remove_field => [ "msg", "datetime" ]
		}
	}
	if [prog] =~ /^pf$/ {
		mutate {
			add_tag => [ "packetfilter" ]
		}
		multiline {
			pattern => "^\s+|^\t\s+"
			what => "previous"
		}
		mutate {
			remove_field => [ "msg", "datetime" ]
			remove_tag => [ "multiline" ]
		}
		grok {
			match => [ "message", "rule (?<rule>.*)\(.*\): (?<action>pass|block) .* on (?<iface>.*): .* proto (?<proto>TCP|UDP|IGMP|ICMP) .*\n\s*(?<src_ip>(\d+\.\d+\.\d+\.\d+))\.?(?<src_port>(\d*)) [<|>] (?<dest_ip>(\d+\.\d+\.\d+\.\d+))\.?(?<dest_port>(\d*)):" ]
		}
	}
	if [prog] =~ /^dhcpd$/ {
		if [message] =~ /^DHCPACK|^DHCPREQUEST|^DHCPOFFER/ {
			grok {
				match => [ "message", "(?<action>.*) (on|for|to) (?<src_ip>[0-2]?[0-9]?[0-9]\.[0-2]?[0-9]?[0-9]\.[0-2]?[0-9]?[0-9]\.[0-2]?[0-9]?[0-9]) .*(?<mac_address>[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]).* via (?<iface>.*)" ]
			}
		}
		if [message] =~ /^DHCPDISCOVER/ {
			grok {
				match => [ "message", "(?<action>.*) from (?<mac_address>[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]).* via (?<iface>.*)" ]
			}
		}
		if [message] =~ /^DHCPINFORM/ {
			grok {
				match => [ "message", "(?<action>.*) from (?<src_ip>.*).* via (?<iface>.*)" ]
			}
		}
	}
}

output {

  elasticsearch {
    host => "127.0.0.1"
    cluster => "logcatcher"
  }
}

Edit: Per request, dashboard template.
Note: Had to add a .txt extension to get wordpress to accept the upload.
FirewallActivity-Dashboard

  8 Responses to “Monitoring pfsense with Logstash / Elasticsearch / Kibana”

  1. Cool post!

    Could you also attach “Firewall Activity” dashboard configuration from the screenshot? It looks awesome.

    Have a nice day!

  2. The dashboard isn’t all that fancy, but I attached it anyway. I’m still figuring out what info I really want to watch.

  3. Thanks for the useful config.

    I added a bit of code for the snort package log parsing in pfSense. This will follow the format you have outlined. As long as the option to send the snort logs to the system log in snort is enabled, this will work . One thing to note is that the prog values will actuall be prog[pid] for some of the packages (cron and dhcp aslo) that are more aligned with syslog and I use a grok filter to separate them. There is no need to turn on barnyard2 and ship the logs to logstash from there because the same information is shipped in both cases.

    if [prog] =~ /^snort/ {
    mutate {
    add_tag => [ “snort” ]
    }
    grok {
    match => [ “message”, “\[%{NONNEGINT:generatorID}:%{NONNEGINT:signatureID}:%{NONNEGINT:signatureID_rev}\] %{GREEDYDATA:description} \[Classification: %{GREEDYDATA:class}\] \[Priority: %{POSINT:priority}\] {%{WORD:proto}} %{IP:src_ip}:%{WORD:src_port} -> %{IP:dest_ip}:%{WORD:dest_port}” ]
    }
    grok {
    match => [ “prog”, “%{PROG:prog}\[%{POSINT:pid}\]” ]
    overwrite => [ “prog” ]
    }

    }

  4. you should update your logstash.conf to include ipv6 addresses. just a thought. 🙂

  5. I fixed the Parse for IPV6 Addresses as well. So far it looks good, and works with the dashboard

    grok {
    match => [ “message”, “rule (?.*)\(.*\): (?pass|block) .* on (?\S+) .* %{IP:src_ip}.(?(?:[+-]?(?:[0-9]+))) > %{IP:dest_ip}.(?(?:[+-]?(?:[0-9]+))).* (?TCP|UDP|IGMP|ICMP|igmp|icmp)” ]
    }


  6. input {
    tcp {
    type => syslog
    port => 514
    }
    udp {
    type => syslog
    port => 514
    }

    }

    filter {
    if [host] =~ /192\.168\.1\.1/ {
    grok {
    add_tag => [ "firewall" ]
    match => [ "message", "<(?.*)>(?(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\s+(?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9]) (?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:[0-5][0-9])) (?.*?): (?.*)" ]
    }
    mutate {
    gsub => ["datetime"," "," "]
    }
    date {
    match => [ "datetime", "MMM dd HH:mm:ss" ]
    }
    mutate {
    replace => [ "message", "%{msg}" ]
    }
    mutate {
    remove_field => [ "msg", "datetime" ]
    }
    }
    if [prog] =~ /^pf$/ {
    mutate {
    add_tag => [ "packetfilter" ]
    }
    multiline {
    pattern => "^\s+|^\t\s+"
    what => "previous"
    }
    mutate {
    remove_field => [ "msg", "datetime" ]
    remove_tag => [ "multiline" ]
    }
    grok {
    match => [
    "message", "rule (?.*)\(.*\): (?pass|block) .* on (?\S+) .* (?TCP|UDP|IGMP|ICMP|igmp|icmp) .* %{IP:src_ip}.(?(?:[+-]?(?:[0-9]+))) > %{IP:dest_ip}.(?(?:[+-]?(?:[0-9]+))).* ",
    "message", "rule (?.*)\(.*\): (?pass|block) .* on (?.*): .* proto (?TCP|UDP|IGMP|ICMP) .*\n\s*(?(\d+\.\d+\.\d+\.\d+))\.?(?(\d*)) [] (?(\d+\.\d+\.\d+\.\d+))\.?(?(\d*)):"
    ]
    }
    }
    if [prog] =~ /^dhcpd$/ {
    if [message] =~ /^DHCPACK|^DHCPREQUEST|^DHCPOFFER/ {
    grok {
    match => [ "message", "(?.*) (on|for|to) (?[0-2]?[0-9]?[0-9]\.[0-2]?[0-9]?[0-9]\.[0-2]?[0-9]?[0-9]\.[0-2]?[0-9]?[0-9]) .*(?[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]).* via (?.*)" ]
    }
    }
    if [message] =~ /^DHCPDISCOVER/ {
    grok {
    match => [ "message", "(?.*) from (?[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]).* via (?.*)" ]
    }
    }
    if [message] =~ /^DHCPINFORM/ {
    grok {
    match => [ "message", "(?.*) from (?.*).* via (?.*)" ]
    }
    }
    }
    }

    output {

    elasticsearch {
    host => "127.0.0.1"
    # cluster => "logcatcher"
    }
    }
    <\code

  7. Any update on this for 1.4.3? Tried using it, but had some issues with multiline.

    Any ideas guys?

  8. Hi,
    By any chance once the VM gets rebooted and when I try to access the URL Im getting upgrade required your version of elastic search is too old. Kibana requires Elastix Search 0.90.9 or above

    and another error:

    error could not reach http://192.168.3.199:80/_nodes. If you are using a proxy, ensure it is configured correctly.

    http://s27.postimg.org/jbu17dxj7/Clipboarder_2015_08_23_075.png
    http://s27.postimg.org/4h5ft7nyb/Clipboarder_2015_08_23_076.png

    Anyone getting this error?

    Thank you

 Leave a Reply

You may use these HTML tags and attributes: <a href="" title=""> <abbr title=""> <acronym title=""> <b> <blockquote cite=""> <cite> <code> <del datetime=""> <em> <i> <q cite=""> <s> <strike> <strong>

(required)

(required)