diff --git a/.gitignore b/.gitignore index dfc5d537f..4c9d4ebba 100644 --- a/.gitignore +++ b/.gitignore @@ -116,4 +116,8 @@ tests/unit/utils # vscode settings files .vscode - +# jekyll files +docs/_site/ +docs/.sass-cache/ +docs/.jekyll-metadata/ +docs/.jekyll-cache/ \ No newline at end of file diff --git a/ChangeLog b/ChangeLog index 067446006..2926523fd 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,14 @@ +2024-12-27 ipfixprobe-4.15.0 + * dpdk: Add DPDK telemetry + * dpdk: add DPDK port telemetry + * dpdk: fix RSS configuration + * ipfix-elements: fix NTP_USEC_TO_FRAC macro + * docs: revision of web page + * README: update telemetry section + +2024-11-14 ipfixprobe-4.14.0 + * dpdk: set mempool size as rx queue size + 2024-10-17 ipfixprobe-4.13.0 * usability: introduce docker/podman container to convert PCAP files to CSV * IPFIX: fix order of TCP options flags diff --git a/Makefile.am b/Makefile.am index 859f7eb5a..ba6c87120 100644 --- a/Makefile.am +++ b/Makefile.am @@ -70,6 +70,14 @@ ipfixprobe_storage_src=\ storage/fragmentationCache/fragmentationCache.cpp \ storage/cache.cpp \ storage/cache.hpp \ + storage/cacheOptParser.hpp \ + storage/cacheOptParser.cpp \ + storage/flowRecord.hpp \ + storage/flowRecord.cpp \ + storage/cttController.hpp \ + storage/cttController.cpp \ + storage/cacheRowSpan.hpp \ + storage/cacheRowSpan.cpp \ storage/xxhash.c \ storage/xxhash.h @@ -180,6 +188,11 @@ ipfixprobe_input_src+=\ input/dpdk/dpdkMbuf.cpp \ input/dpdk/dpdkDevice.hpp \ input/dpdk/dpdkDevice.cpp \ + input/dpdk/dpdkCompat.hpp \ + input/dpdk/dpdkPortTelemetry.hpp \ + input/dpdk/dpdkPortTelemetry.cpp \ + input/dpdk/dpdkTelemetry.hpp \ + input/dpdk/dpdkTelemetry.cpp \ input/dpdk.cpp \ input/dpdk.h \ input/dpdk-ring.cpp \ @@ -203,7 +216,8 @@ ipfixprobe_headers_src=\ include/ipfixprobe/ipfix-elements.hpp \ include/ipfixprobe/rtp.hpp \ include/ipfixprobe/telemetry-utils.hpp \ - include/ipfixprobe/parser-stats.hpp + include/ipfixprobe/parser-stats.hpp \ + include/ipfixprobe/cttmeta.hpp ipfixprobe_src=\ $(ipfixprobe_input_src) \ diff --git a/NEWS b/NEWS index b519d503e..c7b558a2e 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,23 @@ +2024-12-27 (Pavel Siska): dpdk: Add DPDK telemetry +2024-12-27 (Pavel Siska): dpdk: add DPDK port telemetry +2024-12-27 (Pavel Siska): dpdk: add compatible definition macro to dpdkCompact Add RTE_ETH_RSS_GENEVE macro +2024-12-27 (Pavel Siska): dpdk: fix RSS configuration +2024-12-27 (Pavel Siska): dpdk: Add dpdkCompact - compatibility definitions for DPDK versions +2024-12-16 (Pavel Siska): README - update build requirements +2024-12-16 (Pavel Siska): configure.ac - add checks for telemetry and appfs libraries - Added AC_CHECK_LIB for 'telemetry' and 'appfs' to verify library availability. - Ensured 'LIBS' includes '-lappFs -ltelemetry' for proper linking. - Updated RPM dependencies by appending 'telemetry' to RPM_REQUIRES and RPM_BUILDREQ. +2024-12-09 (Jan Sobol): ipfix-elements: fix NTP_USEC_TO_FRAC macro Use standard conversion to ntp timestamps as in https://tickelton.gitlab.io/articles/ntp-timestamps/. +2024-11-28 (Pavel Siska): Readme - update telemetry docs +2024-11-25 (Karel Hynek): docs: fix jekyll links +2024-11-25 (Karel Hynek): docs: fix jekyll base url and update docs description +2024-11-25 (Karel Hynek): docs: fix centering in the website footer +2024-11-20 (Tomas Cejka): docs: revision of web page +2024-11-15 (Karel Hynek): doc: FIX GitHub logo in footer +2024-11-15 (Karel Hynek): README: FIX ipfixprobe logo and link to documentation +2024-11-15 (Karel Hynek): Introduce doc using Jekyll & Update README (#232) + +2024-11-12 (Jan Sobol): dpdkDevice: use mempool size as rx queue size instead of burst size +2024-11-12 (Jan Sobol): dpdk: reduce value of DEFAULT_MBUF_POOL_SIZE to use it also as rx queue size + 2024-10-17 (Tomas Cejka): doc: include generated Doxyfile to create documentation 2024-10-17 (Tomas Cejka): doc: add doxygen comment for parse_packet() 2024-10-16 (Jan Sobol): dpdk-ring - fix checking if any packet has actually been parsed diff --git a/README.md b/README.md index cf8e914f6..d4b39f835 100644 --- a/README.md +++ b/README.md @@ -1,122 +1,21 @@ -# ipfixprobe - IPFIX flow exporter +

+ +

-## Description -This application creates biflows from packet input and exports them to output interface. +[![](https://img.shields.io/badge/license-BSD-blue.svg)](https://github.com/CESNET/ipfixprobe/blob/master/LICENSE) +![Coverity Scan](https://img.shields.io/coverity/scan/22112) +![GitHub top language](https://img.shields.io/github/languages/top/CESNET/ipfixprobe) -## Requirements -- libatomic -- kernel version at least 3.19 when using raw sockets input plugin enabled by default (disable with `--without-raw` parameter for `./configure`) -- [libpcap](http://www.tcpdump.org/) when compiling with pcap plugin (`--with-pcap` parameter) -- netcope-common [COMBO cards](https://www.liberouter.org/technologies/cards/) when compiling with ndp plugin (`--with-ndp` parameter) -- libunwind-devel when compiling with stack unwind on crash feature (`--with-unwind` parameter) -- [nemea](http://github.com/CESNET/Nemea-Framework) when compiling with unirec output plugin (`--with-nemea` parameter) -- cloned submodule with googletest framework to enabled optional tests (`--with-gtest` parameter) - -To compile DPDK interfaces, make sure you have DPDK libraries (and development files) installed and set the `PKG_CONFIG_PATH` environment variable if necessary. You can obtain the latest DPDK at http://core.dpdk.org/download/ Use `--with-dpdk` parameter of the `configure` script to enable it. - -## Build & Installation - -### Source codes - -This project uses a standard process of: - -``` -git clone --recurse-submodules https://github.com/CESNET/ipfixprobe -cd ipfixprobe -autoreconf -i -./configure -make -sudo make install -``` - -Check `./configure --help` for more details and settings. - -### RPM packages - -RPM package can be created in the following versions using `--with` parameter of `rpmbuild`: -- `--with pcap` enables RPM with pcap input plugin -- `--with ndp` enables RPM with netcope-common, i.e., ndp input plugin -- `--with nemea` enables RPM with unirec output plugin -- `--without raw` disables RPM with default raw socket input plugin -- `--with unwind` enables RPM with stack unwinding feature - -These parameters affect required dependencies of the RPM and build process. - -The default configuration of the RPM can be created using simply: `make rpm` - -Alternative versions (described in the following section) can be created by: -- NEMEA version of RPM: `make rpm-nemea` -- NDP version of RPM: `make rpm-ndp` - -We use [COPR infrastructure](https://copr.fedorainfracloud.org/coprs/g/CESNET/NEMEA/) to build and serve RPM packages for EPEL7 and EPEL8. -It is not possible to pass arguments to rpmbuild, so there is an option in configure to enforce NEMEA dependency: - -`./configure --enable-coprrpm && make srpm` +ipfixprobe is a high-performance flow exporter. It creates bidirectional flows from packet input and exports them to output interface. The ipfixprobe support vide variety of flow extenstion for application layer protocol information. The flow extension can be turned on with process plugins. We support TLS, QUIC, HTTP, DNS and many more. Just check our [documentation](https://cesnet.github.io/ipfixprobe/). -The output source RPM can be uploaded to copr. - -To install ipfixprobe with NEMEA dependency from binary RPM packages, it is possible to follow instructions on: -[https://copr.fedorainfracloud.org/coprs/g/CESNET/NEMEA/](https://copr.fedorainfracloud.org/coprs/g/CESNET/NEMEA/) - -### Windows 10 CygWin - -Install CygWin and the following packages: -- git -- pkg-config -- make -- automake -- autoconf -- libtool -- binutils -- gcc-core -- gcc-g++ -- libunwind-devel +## Installation +The RPM packages for RHEL based distributions can be downloaded from our [copr repository](https://copr.fedorainfracloud.org/coprs/g/CESNET/NEMEA/package/ipfixprobe/). Or just simply run: -Download npcap SDK [https://nmap.org/npcap/dist/npcap-sdk-1.07.zip](https://nmap.org/npcap/dist/npcap-sdk-1.07.zip) and copy content of the `Include` folder to `/usr/include` folder in your cygwin root installation folder (`C:\cygwin64\usr\include` for example). Then copy files of the `Lib` folder to `/lib` folder (or `Lib/x64/` based on your architecture). - -Download npcap library [https://nmap.org/npcap/dist/npcap-1.31.exe](https://nmap.org/npcap/dist/npcap-1.31.exe) and install. - -Add the following line to the `~/.bashrc` file ``` -export PATH="/cygdrive/c/Windows/system32/Npcap:$PATH" +dnf install -y dnf-plugins-core && dnf copr -y enable @CESNET/NEMEA +dnf install ipfixprobe ``` -Build project using commands in previous sections. Tested on cygwin version 2.908 - - -## Input / Output of the flow exporter - -Input and output interfaces are dependent on the configuration (by `configure`). -The default setting uses raw sockets input plugin and the output is in IPFIX format only. - -When the project is configured with `./configure --with-nemea`, the flow -exporter supports NEMEA output via TRAP IFC besides the default IPFIX output. -For more information about NEMEA, visit -[https://nemea.liberouter.org](https://nemea.liberouter.org). - -The flow exporter supports compilation with libpcap (`./configure --with-pcap`), which allows for receiving packets -from PCAP file or network interface card. - -When the project is configured with `./configure --with-ndp`, it is prepared for high-speed packet transfer -from special HW acceleration FPGA cards. For more information about the cards, -visit [COMBO cards](https://www.liberouter.org/technologies/cards/) or contact -us. - -### Output - -There are several currently available output plugins, such as: - -- `ipfix` standard IPFIX [RFC 5101](https://tools.ietf.org/html/rfc5101) -- `unirec` data source for the [NEMEA system](https://nemea.liberouter.org), the output is in the UniRec format sent via a configurable interface using [https://nemea.liberouter.org/trap-ifcspec/](https://nemea.liberouter.org/trap-ifcspec/) -- `text` output in human readable text format on standard output file descriptor (stdout) - -The output flow records are composed of information provided by the enabled plugins (using `-p` parameter, see [Flow Data Extension - Processing Plugins](./README.md#flow-data-extension---processing-plugins)). - -See `ipfixprobe -h output` for more information and complete list of output plugins and their parameters. - -LZ4 compression: -ipfix plugin supports LZ4 compression algorithm over tcp. See plugin's help for more information. - ## Parameters ### Module specific parameters - `-i ARGS` Activate input plugin (-h input for help) @@ -172,576 +71,197 @@ Here are the examples of various plugins usage: `./ipfixprobe -i 'dpdk-ring;r=rx_ipfixprobe_0;e= --proc-type=secondary' -i 'dpdk-ring;r=rx_ipfixprobe_1' -i 'dpdk-ring;r=rx_ipfixprobe_2' -i 'dpdk-ring;r=rx_ipfixprobe_3' -o 'text'` ``` -## Telemetry - -`ipfixprobe` can expose telemetry data using the appFs library, which leverages the fuse3 library (filesystem in userspace) to allow telemetry data to be accessed and manipulated -through standard filesystem operations. - -## Flow Data Extension - Processing Plugins - -`ipfixprobe` can be extended by new plugins for exporting various new information from flow. -There are already some existing plugins that export e.g. `DNS`, `HTTP`, `SIP`, `NTP`, `PassiveDNS`. +## Build -To enable a plugin, add `-p` option with argument (it can be used multiple times). Each plugin provides a set of information described in section Output data. - -See `ipfixprobe -h process` for more information and complete list of processing plugins and their parameters. - -## Adding new plugin - -To create new plugin use [process/create_plugin.sh](process/create_plugin.sh) script. This interactive script will generate .cpp and .h -file template and will also print `TODO` guide what needs to be done. +### Requirements +- libatomic +- [telemetry](https://github.com/CESNET/telemetry) (mandatory) — can be installed from the [COPR repository](https://copr.fedorainfracloud.org/coprs/g/CESNET/NEMEA-stable/package/telemetry/) or built from source code +- kernel version at least 3.19 when using raw sockets input plugin enabled by default (disable with `--without-raw` parameter for `./configure`) +- [libpcap](http://www.tcpdump.org/) when compiling with pcap plugin (`--with-pcap` parameter) +- netcope-common [COMBO cards](https://www.liberouter.org/technologies/cards/) when compiling with ndp plugin (`--with-ndp` parameter) +- libunwind-devel when compiling with stack unwind on crash feature (`--with-unwind` parameter) +- [nemea](http://github.com/CESNET/Nemea-Framework) when compiling with unirec output plugin (`--with-nemea` parameter) +- cloned submodule with googletest framework to enabled optional tests (`--with-gtest` parameter) -## Possible issues -### Flows are not send to output interface when reading small pcap file (NEMEA output) +To compile DPDK interfaces, make sure you have DPDK libraries (and development files) installed and set the `PKG_CONFIG_PATH` environment variable if necessary. You can obtain the latest DPDK at http://core.dpdk.org/download/ Use `--with-dpdk` parameter of the `configure` script to enable it. -Turn off message buffering using `buffer=off` option and set `timeout=WAIT` on output interfaces. +### Source codes -``` -./ipfixprobe -i 'pcap;file=traffic.pcap' -o 'unirec;i=u:out:timeout=WAIT:buffer=off' -``` +This project uses a standard process of: -## Output data - -The following sections describe set of information fields provided by the processing plugins. -The columns `Output field` and `Type` represent the name and type of UniRec elements (NEMEA output); however, the equivalent fields are exported in other output plugins as well --- e.g., in IPFIX format. - -Note: to lookup IPFIX enterprise id and element id have a look into [header file](https://github.com/CESNET/ipfixprobe/blob/master/include/ipfixprobe/ipfix-elements.hpp#L85) with the mapping to IPFIX elements. - -### Basic -Basic unirec fields exported on interface with basic (pseudo) plugin. These fields are also exported on interfaces where HTTP, DNS, SIP and NTP plugins are active. - -| Output field | Type | Description | -|:----------------------:|:----------------:|:---------------------------------------------------:| -| DST_MAC | macaddr | destination MAC address | -| SRC_MAC | macaddr | source MAC address | -| DST_IP | ipaddr | destination IP address | -| SRC_IP | ipaddr | source IP address | -| BYTES | uint64 | number of bytes in data flow (src to dst) | -| BYTES_REV | uint64 | number of bytes in data flow (dst to src) | -| LINK_BIT_FIELD or ODID | uint64 or uint32 | exporter identification | -| TIME_FIRST | time | first time stamp | -| TIME_LAST | time | last time stamp | -| PACKETS | uint32 | number of packets in data flow (src to dst) | -| PACKETS_REV | uint32 | number of packets in data flow (dst to src) | -| DST_PORT | uint16 | transport layer destination port | -| SRC_PORT | uint16 | transport layer source port | -| DIR_BIT_FIELD | uint8 | bit field for determining outgoing/incoming traffic | -| PROTOCOL | uint8 | transport protocol | -| TCP_FLAGS | uint8 | TCP protocol flags (src to dst) | -| TCP_FLAGS_REV | uint8 | TCP protocol flags (dst to src) | - -### Basic plus -List of unirec fields exported together with basic flow fields on interface by basicplus plugin. -Fields without `_REV` suffix are fields from source flow. Fields with `_REV` are from the opposite direction. - -| Output field | Type | Description | -|:------------:|:------:|:---------------------------:| -| IP_TTL | uint8 | IP TTL field | -| IP_TTL_REV | uint8 | IP TTL field | -| IP_FLG | uint8 | IP FLAGS | -| IP_FLG_REV | uint8 | IP FLAGS | -| TCP_WIN | uint16 | TCP window size | -| TCP_WIN_REV | uint16 | TCP window size | -| TCP_OPT | uint64 | TCP options bitfield | -| TCP_OPT_REV | uint64 | TCP options bitfield | -| TCP_MSS | uint32 | TCP maximum segment size | -| TCP_MSS_REV | uint32 | TCP maximum segment size | -| TCP_SYN_SIZE | uint16 | TCP SYN packet size | - -### NetTiSA -List of unirec fields exported together with NetTiSA flow fields on interface by nettisa plugin. - -| Output field | Type | Description | -|:------------:|:------:|:---------------------------:| -| NTS_MEAN | float | The mean of the payload lengths of packets | -| NTS_MIN | uint16 | Minimal value from all packet payload lengths | -| NTS_MAX | uint16 | Maximum value from all packet payload lengths | -| NTS_STDEV | float | Represents a switching ratio between different values of the sequence of observation. | -| NTS_KURTOSIS | float | The standard deviation is measure of the variation of data from the mean. | -| NTS_ROOT_MEAN_SQUARE | float | The measure of the magnitude of payload lengths of packets. | -| NTS_AVERAGE_DISPERSION | float | The average absolute difference between each payload length of packet and the mean value. | -| NTS_MEAN_SCALED_TIME | float | The kurtosis is the measure describing the extent to which the tails of a distribution differ from the tails of a normal distribution. | -| NTS_MEAN_DIFFTIMES | float | The scaled times is defined as sequence $\{st\} = \{ t_1 - t_1, t_2 - t_1, \dots, t_n - t_1 \}$. We compute the mean of the value with same method as for feature \textit{Mean}. | -| NTS_MIN_DIFFTIMES | float | The time differences is defined as sequence $ \{dt\} = \{ t_j - t_i \| j = i + 1, i \in \{1, 2, \dots, n - 1\}\}$. We compute the mean of the value with same method as for feature \textit{Mean}. | -| NTS_MAX_DIFFTIMES | float | Minimal value from all time differences, i.e., min space between packets. | -| NTS_TIME_DISTRIBUTION | float | Maximum value from all time differences, i.e., max space between packets. | -| NTS_SWITCHING_RATIO | float | Describes the distribution of time differences between individual packets. | - -### HTTP -List of unirec fields exported together with basic flow fields on interface by HTTP plugin. - -| Output field | Type | Description | -|:------------------------------:|:------:|:-----------------------------------------------------------:| -| HTTP_REQUEST_METHOD | string | HTTP request method | -| HTTP_REQUEST_HOST | string | HTTP request host | -| HTTP_REQUEST_URL | string | HTTP request url | -| HTTP_REQUEST_AGENT | string | HTTP request user agent | -| HTTP_REQUEST_REFERER | string | HTTP request referer | -| HTTP_RESPONSE_STATUS_CODE | uint16 | HTTP response code | -| HTTP_RESPONSE_CONTENT_TYPE | string | HTTP response content type | -| HTTP_RESPONSE_SERVER | string | HTTP response server | -| HTTP_RESPONSE_SET_COOKIE_NAMES | string | HTTP response all set-cookie names separated by a delimiter | - -### RTSP -List of unirec fields exported together with basic flow fields on interface by RTSP plugin. - -| Output field | Type | Description | -|:----------------------------:|:------:|:---------------------------:| -| RTSP_REQUEST_METHOD | string | RTSP request method name | -| RTSP_REQUEST_AGENT | string | RTSP request user agent | -| RTSP_REQUEST_URI | string | RTSP request URI | -| RTSP_RESPONSE_STATUS_CODE | uint16 | RTSP response status code | -| RTSP_RESPONSE_SERVER | string | RTSP response server field | -| RTSP_RESPONSE_CONTENT_TYPE | string | RTSP response content type | - -### TLS -List of unirec fields exported together with basic flow fields on interface by TLS plugin. - -| Output field | Type | Description | -|:------------:|:--------:|:------------------------------------------------------------:| -| TLS_SNI | string | TLS server name indication field from client | -| TLS_ALPN | string | TLS application protocol layer negotiation field from server | -| TLS_VERSION | uint16 | TLS client protocol version | -| TLS_JA3 | string | TLS client JA3 fingerprint | -| TLS_EXT_TYPE | uint16\* | TLS extensions in the TLS Client Hello | -| TLS_EXT_LEN | uint16\* | Length of each TLS extension | - -### DNS -List of unirec fields exported together with basic flow fields on interface by DNS plugin. - -| Output field | Type | Description | -|:------------:|:------:|:-------------------------------:| -| DNS_ID | uint16 | transaction ID | -| DNS_ANSWERS | uint16 | number of DNS answer records | -| DNS_RCODE | uint8 | response code field | -| DNS_NAME | string | question domain name | -| DNS_QTYPE | uint16 | question type field | -| DNS_CLASS | uint16 | class field of DNS question | -| DNS_RR_TTL | uint32 | resource record TTL field | -| DNS_RLENGTH | uint16 | length of DNS_RDATA | -| DNS_RDATA | bytes | resource record specific data | -| DNS_PSIZE | uint16 | requestor's payload size | -| DNS_DO | uint8 | DNSSEC OK bit | - -#### DNS_RDATA format - -DNS_RDATA formatting is implemented for some base DNS RR Types in human-readable output. -Same as [here](https://www.liberouter.org/technologies/exporter/dns-plugin/): - -| Record | Format | -|:------:|:------:| -| A | | -| AAAA | | -| NS | | -| CNAME | | -| PTR | | -| DNAME | | -| SOA | | -| SRV | | -| MX | | -| TXT | | -| MINFO | | -| HINFO | | -| ISDN | | -| DS | \* | -| RRSIG | \* | -| DNSKEY | \* | -| other | \* | - - \* binary data are skipped and not printed - -### PassiveDNS -List of unirec fields exported together with basic flow fields on interface by PassiveDNS plugin. - -| Output field | Type | Description | -|:------------:|:------:|:---------------------------------------:| -| DNS_ID | uint16 | transaction ID | -| DNS_ATYPE | uint8 | response record type | -| DNS_NAME | string | question domain name | -| DNS_RR_TTL | uint32 | resource record TTL field | -| DNS_IP | ipaddr | IP address from PTR, A or AAAA record | - - -### MQTT -List of unirec fields exported together with basic flow fields on interface by MQTT plugin. - -| Output field | Type | Description | -|:-----------------------------:|:------:|:-----------------------------------------------------:| -| MQTT_TYPE_CUMULATIVE | uint16 | types of packets and session present flag cumulative | -| MQTT_VERSION | uint8 | MQTT version | -| MQTT_CONNECTION_FLAGS | uint8 | last CONNECT packet flags | -| MQTT_KEEP_ALIVE | uint16 | last CONNECT keep alive | -| MQTT_CONNECTION_RETURN_CODE | uint8 | last CONNECT return code | -| MQTT_PUBLISH_FLAGS | uint8 | cumulative of PUBLISH packet flags | -| MQTT_TOPICS | string | topics from PUBLISH packets headers | - -### SIP -List of unirec fields exported together with basic flow fields on interface by SIP plugin. - -| Output field | Type | Description | -|:-----------------:|:------:|:-------------------------------:| -| SIP_MSG_TYPE | uint16 | SIP message code | -| SIP_STATUS_CODE | uint16 | status of the SIP request | -| SIP_CSEQ | string | CSeq field of SIP packet | -| SIP_CALLING_PARTY | string | calling party (from) URI | -| SIP_CALLED_PARTY | string | called party (to) URI | -| SIP_CALL_ID | string | call ID | -| SIP_USER_AGENT | string | user agent field of SIP packet | -| SIP_REQUEST_URI | string | SIP request URI | -| SIP_VIA | string | via field of SIP packet | - -### NTP -List of unirec fields exported together with basic flow fields on interface by NTP plugin. - -| Output field | Type | Description | -|:--------------:|:------:|:-------------------------:| -| NTP_LEAP | uint8 | NTP leap field | -| NTP_VERSION | uint8 | NTP message version | -| NTP_MODE | uint8 | NTP mode field | -| NTP_STRATUM | uint8 | NTP stratum field | -| NTP_POLL | uint8 | NTP poll interval | -| NTP_PRECISION | uint8 | NTP precision field | -| NTP_DELAY | uint32 | NTP root delay | -| NTP_DISPERSION | uint32 | NTP root dispersion | -| NTP_REF_ID | string | NTP reference ID | -| NTP_REF | string | NTP reference timestamp | -| NTP_ORIG | string | NTP origin timestamp | -| NTP_RECV | string | NTP receive timestamp | -| NTP_SENT | string | NTP transmit timestamp | - -### SMTP -List of unirec fields exported on interface by SMTP plugin - -| Output field | Type | Description | -|:-------------------------:|:------:|:-----------------------------------:| -| SMTP_2XX_STAT_CODE_COUNT | uint32 | number of 2XX status codes | -| SMTP_3XX_STAT_CODE_COUNT | uint32 | number of 3XX status codes | -| SMTP_4XX_STAT_CODE_COUNT | uint32 | number of 4XX status codes | -| SMTP_5XX_STAT_CODE_COUNT | uint32 | number of 5XX status codes | -| SMTP_COMMAND_FLAGS | uint32 | bit array of commands present | -| SMTP_MAIL_CMD_COUNT | uint32 | number of MAIL commands | -| SMTP_RCPT_CMD_COUNT | uint32 | number of RCPT commands | -| SMTP_STAT_CODE_FLAGS | uint32 | bit array of status codes present | -| SMTP_DOMAIN | string | domain name of the SMTP client | -| SMTP_FIRST_SENDER | string | first sender in MAIL command | -| SMTP_FIRST_RECIPIENT | string | first recipient in RCPT command | - -#### SMTP\_COMMAND\_FLAGS -The following table shows bit values of `SMTP\_COMMAND\_FLAGS` for each SMTP command present in communication. - -| Command | Value | -|:--------:|:------:| -| EHLO | 0x0001 | -| HELO | 0x0002 | -| MAIL | 0x0004 | -| RCPT | 0x0008 | -| DATA | 0x0010 | -| RSET | 0x0020 | -| VRFY | 0x0040 | -| EXPN | 0x0080 | -| HELP | 0x0100 | -| NOOP | 0x0200 | -| QUIT | 0x0400 | -| UNKNOWN | 0x8000 | - -#### SMTP\_STAT\_CODE\_FLAGS -The following table shows bit values of `SMTP\_STAT_CODE\_FLAGS` for each present in communication. - -| Status code | Value | -|:-----------:|:----------:| -| 211 | 0x00000001 | -| 214 | 0x00000002 | -| 220 | 0x00000004 | -| 221 | 0x00000008 | -| 250 | 0x00000010 | -| 251 | 0x00000020 | -| 252 | 0x00000040 | -| 354 | 0x00000080 | -| 421 | 0x00000100 | -| 450 | 0x00000200 | -| 451 | 0x00000400 | -| 452 | 0x00000800 | -| 455 | 0x00001000 | -| 500 | 0x00002000 | -| 501 | 0x00004000 | -| 502 | 0x00008000 | -| 503 | 0x00010000 | -| 504 | 0x00020000 | -| 550 | 0x00040000 | -| 551 | 0x00080000 | -| 552 | 0x00100000 | -| 553 | 0x00200000 | -| 554 | 0x00400000 | -| 555 | 0x00800000 | -| * | 0x40000000 | -| UNKNOWN | 0x80000000 | - -* Bit is set if answer contains SPAM keyword. - -### PSTATS -List of unirec fields exported on interface by PSTATS plugin. The plugin is compiled to gather statistics for the first `PSTATS_MAXELEMCOUNT` (30 by default) packets in the biflow record. -Note: the following fields are UniRec arrays (or basicList in IPFIX). - -| Output field | Type | Description | -|:--------------------------:|:--------:|:--------------------------------------:| -| PPI_PKT_LENGTHS | uint16\* | sizes of the first packets | -| PPI_PKT_TIMES | time\* | timestamps of the first packets | -| PPI_PKT_DIRECTIONS | int8\* | directions of the first packets | -| PPI_PKT_FLAGS | uint8\* | TCP flags for each packet | - -#### Plugin parameters: -- includezeros - Include zero-length packets in the lists. -- skipdup - Skip retransmitted (duplicated) TCP packets. - -##### Example: ``` -ipfixprobe 'pcap;file=pcaps/http.pcap' -p "pstats;includezeros" -o 'unirec;i=u:stats:timeout=WAIT;p=stats'" +git clone --recurse-submodules https://github.com/CESNET/ipfixprobe +cd ipfixprobe +autoreconf -i +./configure +make +sudo make install ``` -### OSQUERY -List of unirec fields exported together with basic flow fields on interface by OSQUERY plugin. - -| Output field | Type | Description | -|:--------------------------:|:--------:|:---------------------------------------------------:| -| PROGRAM_NAME | string | The name of the program that handles the connection | -| USERNAME | string | The name of the user who starts the process | -| OS_NAME | string | Distribution or product name | -| OS_MAJOR | uint16 | Major release version | -| OS_MINOR | uint16 | Minor release version | -| OS_BUILD | string | Optional build-specific or variant string | -| OS_PLATFORM | string | OS Platform or ID | -| OS_PLATFORM_LIKE | string | Closely related platforms | -| OS_ARCH | string | OS Architecture | -| KERNEL_VERSION | string | Kernel version | -| SYSTEM_HOSTNAME | string | Network hostname including domain | - -### SSDP -List of unirec fields exported together with basic flow fields on interface by SSDP plugin. - -| Output field | Type | Description | -|:------------------:|:------:|:-------------------------------:| -| SSDP_LOCATION_PORT | uint16 | service port | -| SSDP_NT | string | list of advertised service urns | -| SSDP_SERVER | string | server info | -| SSDP_ST | string | list of queried service urns | -| SSDP_USER_AGENT | string | list of user agents | - -All lists are semicolon separated. - -### DNS-SD -List of unirec fields exported together with basic flow fields on interface by DNS-SD plugin. - -| Output field | Type | Description | -|:---------------:|:------:|:-------------------------------:| -| DNSSD_QUERIES | string | list of queries for services | -| DNSSD_RESPONSES | string | list of advertised services | - -Format of DNSSD_QUERIES: [service_instance_name;][...] - -Format of DNSSD_RESPONSES: [service_instance_name;service_port;service_target;hinfo;txt;][...] - -#### Plugin parameters: -- txt - Activates processing of txt records. - - Allows to pass a filepath to .csv file with whitelist filter of txt records. - - File line format: service.domain,txt_key1,txt_key2,... - - If no filepath is provided, all txt records will be aggregated. - -### OVPN (OpenVPN) - -List of fields exported together with basic flow fields on interface by OVPN plugin. - -| Output field | Type | Description | -|:------------------:|:------:|:-------------------------------:| -| OVPN_CONF_LEVEL | uint8 | level of confidence that the flow record is an OpenVPN tunnel | - - -### IDPContent (Initial Data Packets Content) +Check `./configure --help` for more details and settings. -List of fields exported together with basic flow fields on the interface by IDPContent plugin. -The plugin is compiled to export `IDPCONTENT_SIZE` (100 by default) bytes from the first data packet in SRC -> DST direction, -and the first data packet in DST -> SRC direction. +### RPM packages -| Output field | Type | Description | -|:------------------:|:------:|:-------------------------------:| -| IDP_CONTENT | bytes | Content of first data packet from SRC -> DST| -| IDP_CONTENT_REV | bytes | Content of first data packet from DST -> SRC| +RPM package can be created in the following versions using `--with` parameter of `rpmbuild`: +- `--with pcap` enables RPM with pcap input plugin +- `--with ndp` enables RPM with netcope-common, i.e., ndp input plugin +- `--with nemea` enables RPM with unirec output plugin +- `--without raw` disables RPM with default raw socket input plugin +- `--with unwind` enables RPM with stack unwinding feature -### NetBIOS +These parameters affect required dependencies of the RPM and build process. -List of fields exported together with basic flow fields on interface by NetBIOS plugin. +The default configuration of the RPM can be created using simply: `make rpm` -| Output field | Type | Description | -|:-------------:|:------:|:---------------------------:| -| NB_NAME | string | NetBIOS Name Service name | -| NB_SUFFIX | uint8 | NetBIOS Name Service suffix | +Alternative versions (described in the following section) can be created by: +- NEMEA version of RPM: `make rpm-nemea` +- NDP version of RPM: `make rpm-ndp` -### PHISTS +We use [COPR infrastructure](https://copr.fedorainfracloud.org/coprs/g/CESNET/NEMEA/) to build and serve RPM packages for EPEL9. +It is not possible to pass arguments to rpmbuild, so there is an option in configure to enforce NEMEA dependency: -List of fields exported together with basic flow fields on the interface by PHISTS plugin. -The plugin exports the histograms of Payload sizes and Inter-Packet-Times for each direction. The -histograms bins are scaled logarithmicaly and are shown in following table: +`./configure --enable-coprrpm && make srpm` -| Bin Number | Size Len | Inter Packet Time | -|:----------:|:----------:|:-----------------:| -| 1 | 0-15 B | 0-15 ms | -| 2 | 16-31 B | 16-31 ms | -| 3 | 32-63 B | 32-63 ms | -| 4 | 64-127 B | 64-127 ms | -| 5 | 128-255 B | 128-255 ms | -| 6 | 256-511 B | 256-511 ms | -| 7 | 512-1023 B | 512-1023 ms | -| 8 | > 1024 B | > 1024 ms | +The output source RPM can be uploaded to copr. -The exported unirec fields and IPFIX basiclists is shown in following table: +To install ipfixprobe with NEMEA dependency from binary RPM packages, it is possible to follow instructions on: +[https://copr.fedorainfracloud.org/coprs/g/CESNET/NEMEA/](https://copr.fedorainfracloud.org/coprs/g/CESNET/NEMEA/) -| Output field | Type | Description | -|:-------------------:|:-------:|:---------------------------------------:| -| D_PHISTS_IPT | uint32\*| DST->SRC: Histogram of interpacket times| -| D_PHISTS_SIZES | uint32\*| DST->SRC: Histogram of packet sizes | -| S_PHISTS_IPT | uint32\*| SRC->DST: Histogram of interpacket times| -| S_PHISTS_SIZES | uint32\*| SRC->DST: Histogram of packet sizes | +## Telemetry -#### Plugin parameters: -- includezeros - Include zero-length packets in the lists. +`ipfixprobe` exports statistics and other diagnostic information through a telemetry interface based on appFs library, which leverages the fuse3 library (filesystem in userspace) to allow telemetry data to be accessed and manipulated +through standard filesystem operations. -##### Example: ``` -ipfixprobe 'pcap;file=pcaps/http.pcap' -p "phists;includezeros" -o 'unirec;i=u:hists:timeout=WAIT;p=phists'" +/var/run/ipfixprobe/ +├── output +│   └── ipxRing +│   └── stats +└── pipeline + └── queues + ├── 0 + │   ├── cache-stats + │   ├── fragmentation-cache-stats + │   ├── input-stats + │   └── parser-stats + ├── 1 + ... ``` -### BSTATS - -List of fields exported together with basic flow fields on the interface by BSTATS plugin. -The plugin is compiled to export the first `BSTATS_MAXELENCOUNT` (15 by default) burst in each direction. -The bursts are computed separately for each direction. Burst is defined by `MINIMAL_PACKETS_IN_BURST` (3 by default) and by `MAXIMAL_INTERPKT_TIME` (1000 ms by default) between packets to be included in a burst. When the flow contains less then `MINIMAL_PACKETS_IN_BURST` packets, the fields are not exported to reduce output bandwidth. - -| Output field | Type | Description | -|:-------------------:|:-------:|:---------------------------------------------------------------:| -| SBI_BRST_PACKETS | uint32\* | SRC->DST: Number of packets transmitted in ith burst| -| SBI_BRST_BYTES | uint32\* | SRC->DST: Number of bytes transmitted in ith burst | -| SBI_BRST_TIME_START | time\* | SRC->DST: Start time of the ith burst | -| SBI_BRST_TIME_STOP | time\* | SRC->DST: End time of the ith burst | -| DBI_BRST_PACKETS | uint32\* | DST->SRC: Number of packets transmitted in ith burst| -| DBI_BRST_BYTES | uint32\* | DST->SRC: Number of bytes transmitted in ith burst | -| DBI_BRST_TIME_START | time\* | DST->SRC: Start time of the ith burst | -| DBI_BRST_TIME_STOP | time\* | DST->SRC: End time of the ith burst | - -### WG (WireGuard) - -List of fields exported together with basic flow fields on interface by WG plugin. - -| Output field | Type | Description | -|:------------------:|:------:|:-------------------------------:| -| WG_CONF_LEVEL | uint8 | level of confidence that the flow record is a WireGuard tunnel| -| WG_SRC_PEER | uint32 | ephemeral SRC peer identifier | -| WG_DST_PEER | uint32 | ephemeral DST peer identifier | -### QUIC +From telemetry stats you can read: -List of fields exported together with basic flow fields on interface by quic plugin. -`-with-quic-ch-full-tls-ext` enables extraction of all TLS extensions in the Client Hello. +**Output Directory:** -| Output field | Type | Description | -|:-------------------:|:--------:|:---------------------------------------------------------------------------------------------:| -| QUIC_SNI | string | Decrypted server name | -| QUIC_USER_AGENT | string | Decrypted user agent | -| QUIC_VERSION | uint32 | QUIC version from first server long header packets | -| QUIC_CLIENT_VERSION | uint32 | QUIC version from first client long header packet | -| QUIC_TOKEN_LENGTH | uint64 | Token length from Initial and Retry packets | -| QUIC_OCCID | bytes | Source Connection ID from first client packet | -| QUIC_OSCID | bytes | Destination Connection ID from first client packet | -| QUIC_SCID | bytes | Source Connection ID from first server packet | -| QUIC_RETRY_SCID | bytes | Source Connection ID from Retry packet | -| QUIC_MULTIPLEXED | uint8 | > 0 if multiplexed (at least two different QUIC_OSCIDs or SNIs) | -| QUIC_ZERO_RTT | uint8 | Number of 0-RTT packets in flow. | -| QUIC_SERVER_PORT | uint16 | TODO Server Port determined by packet type and TLS message | -| QUIC_PACKETS | uint8\* | QUIC long header packet type (v1 encoded), version negotiation, QUIC bit | -| QUIC_CH_PARSED | uint8 | >0 if TLS Client Hello parsed without errors | -| QUIC_TLS_EXT_TYPE | uint16\* | TLS extensions in the TLS Client Hello | -| QUIC_TLS_EXT_LEN | uint16\* | Length of each TLS extension | -| QUIC_TLS_EXT | string | Payload of all/application_layer_protocol_negotiation and quic_transport params TLS extension | +In the output directory, you can monitor the capacity and current usage of the ipxRing. This helps determine whether the output plugin can export flows quickly enough or if there are bottlenecks caused by insufficient ring capacity. -### ICMP +***Example: ipxRing/stats*** +``` +count: 204 +size: 16536 +usage: 1.23 (%) +``` -List of fields exported together with basic flow fields on interface by icmp plugin. +**Pipeline Directory:** + +The pipeline directory provides statistics for all worker queues. Each queue is identified by its ID (e.g., 0, 1) and includes the following files: +- cache-stats: Provides detailed metrics about flow cache usage and exported flow statistics. + + ***Example:*** + + ``` + FlowCacheUsage: 3.81 (%) + FlowEndReason:ActiveTimeout: 34666654 + FlowEndReason:Collision: 4272143 + FlowEndReason:EndOfFlow: 486129363 + FlowEndReason:Forced: 58905 + FlowEndReason:InactiveTimeout: 2169352600 + FlowRecordStats:11-20packets: 178735501 + FlowRecordStats:1packet: 1824500140 + FlowRecordStats:2-5packets: 376268956 + FlowRecordStats:21-50packets: 87971544 + FlowRecordStats:51-plusPackets: 55424342 + FlowRecordStats:6-10packets: 171579322 + FlowsInCache: 39986 + TotalExportedFlows: 2694479805 + ``` + +- fragmentation-cache-stats: Provides metrics related to packet fragmentation. + + ***Example:*** + + ``` + firstFragments: 163634416 + fragmentedPackets: 395736897 + fragmentedTraffic: 0.13 (%) + notFoundFragments: 85585913 + totalPackets: 314829930486 + ``` + +- input-stats: Provides metrics on the data received by by the queue. + + ***Example:*** + ``` + received_bytes: 388582006601530 + received_packets: 314788702409 + ``` + + +- parser-stats: Provides detailed information about the types of packets processed by the parser. + + ***Example:*** + ``` + ipv4_packets: 193213761481 + ipv6_packets: 121566104060 + mpls_packets: 0 + pppoe_packets: 0 + seen_packets: 314791928764 + tcp_packets: 301552123188 + trill_packets: 0 + udp_packets: 12783568334 + unknown_packets: 11601117 + vlan_packets: 31477986554 + ``` -| Output field | Type | Description | -|:------------------:|:------:|:-------------------------------:| -| L4_ICMP_TYPE_CODE | uint16 | ICMP type (MSB) and code (LSB) | -### SSADetector +## Input / Output of the flow exporter -List of fields exported together with basic flow fields on interface by ssadetector plugin. -The detector search for the SYN SYN-ACK ACK pattern in packet lengths. Multiple occurrences of this pattern suggest a tunneled connection. +The availability of the input and output interfaces depends on the ipfixprobe build settings. By default, we provide RPM package with pcap and raw inputs. The default provided outpus are ipfix and text. -| Output field | Type | Description | -|:------------------:|:------:|:---------------------------------------:| -| SSA_CONF_LEVEL | uint8 | 1 if SSA sequence detected, 0 otherwise | +When the project is configured with `./configure --with-nemea`, the flow +exporter supports NEMEA output via TRAP IFC besides the default IPFIX output. +For more information about NEMEA, visit +[https://nemea.liberouter.org](https://nemea.liberouter.org). -### VLAN +The flow exporter supports compilation with libpcap (`./configure --with-pcap`), which allows for receiving packets +from PCAP file or network interface card. -List of fields exported together with basic flow fields on the interface by VLAN plugin. +When the project is configured with `./configure --with-ndp`, it is prepared for high-speed packet transfer +from special HW acceleration FPGA cards. For more information about the cards, +visit [COMBO cards](https://www.liberouter.org/technologies/cards/) or contact +us. -| Output field | Type | Description | -|:------------:|:------:|:--------------------------:| -| VLAN_ID | uint16 | Vlan ID (used in flow key) | +### Output -### Flow Hash +There are several currently available output plugins, such as: -List of fields exported together with basic flow fields on interface by flow_hash plugin. +- `ipfix` standard IPFIX [RFC 5101](https://tools.ietf.org/html/rfc5101) +- `unirec` data source for the [NEMEA system](https://nemea.liberouter.org), the output is in the UniRec format sent via a configurable interface using [https://nemea.liberouter.org/trap-ifcspec/](https://nemea.liberouter.org/trap-ifcspec/) +- `text` output in human readable text format on standard output file descriptor (stdout) -| Output field | Type | Description | -|:------------------:|:------:|:---------------------------------:| -| FLOW_ID | uint64 | Hash of the flow - unique flow id | +The output flow records are composed of information provided by the enabled plugins (using `-p` parameter, see [Flow Data Extension - Processing Plugins](./README.md#flow-data-extension---processing-plugins)). -### MPLS +See `ipfixprobe -h output` for more information and complete list of output plugins and their parameters. -List of fields exported together with basic flow fields on interface by mpls plugin. +LZ4 compression: +ipfix plugin supports LZ4 compression algorithm over tcp. See plugin's help for more information. -| Output field | Type | Description | -|:----------------------------:|:-----:|:------------------------------------------------:| -| MPLS_TOP_LABEL_STACK_SECTION | bytes | MPLS label section (without TTL), always 3 bytes | -## Simplified function diagram -Diagram below shows how `ipfixprobe` works. +## Possible issues +### Flows are not send to output interface when reading small pcap file (NEMEA output) -1. `Packet` is read from pcap file or network interface -2. `Packet` is processed by PcapReader and is about to put to flow cache -3. Flow cache create or update flow and call `pre_create`, `post_create`, `pre_update`, `post_update` and `pre_export` functions for each active plugin at appropriate time -4. `Flow` is put into exporter when considered as expired, flow cache is full or is forced to by a plugin -5. Exporter fills `unirec record`, which is then send it to output libtrap interface +Turn off message buffering using `buffer=off` option and set `timeout=WAIT` on output interfaces. ``` - +--------------------------------+ - | pcap file or network interface | - +-----+--------------------------+ - | - 1. | - | +-----+ - +--------v---------+ | - | | +-----------+ | - | PcapReader | +------> Plugin1 | | - | | | +-----------+ | - +--------+---------+ | | - | | +-----------+ | - 2. | +------> Plugin2 | | - | | +-----------+ | - +--------v---------+ | | - | | 3. | +-----------+ +----+ active plugins - | NHTFlowCache +-------------> Plugin3 | | - | | | +-----------+ | - +--------+---------+ | | - | | . | - 4. | | . | - | | . | - +--------v---------+ | | - | | | +-----------+ | - | UnirecExporter | +------> PluginN | | - | | +-----------+ | - +--------+---------+ | - | +-----+ - 5. | - | - +-----v--------------------------+ - | libtrap output interface | - +--------------------------------+ +./ipfixprobe -i 'pcap;file=traffic.pcap' -o 'unirec;i=u:out:timeout=WAIT:buffer=off' ``` + diff --git a/configure.ac b/configure.ac index c8f2ee0e0..de9683138 100644 --- a/configure.ac +++ b/configure.ac @@ -2,7 +2,7 @@ # Process this file with autoconf to produce a configure script. AC_PREREQ([2.69]) -AC_INIT([ipfixprobe], [4.13.0], [nemea@cesnet.cz]) +AC_INIT([ipfixprobe], [4.15.0], [nemea@cesnet.cz]) AC_CONFIG_SRCDIR([main.cpp]) AC_CONFIG_HEADERS([config.h]) @@ -226,6 +226,32 @@ if [[ -z "$WITH_NDP_TRUE" ]]; then RPM_BUILDREQ+=" netcope-common-devel" fi +AC_ARG_WITH([ctt], + AC_HELP_STRING([--with-ctt],[Compile ipfixprobe with ctt plugin for using Connection Tracking Table]), + [ + if test "$withval" = "yes"; then + withctt="yes" + else + withctt="no" + fi + ], [withctt="no"] +) + +if test x${withctt} = xyes; then + AC_LANG_PUSH([C++]) + CXXFLAGS="$CXXFLAGS -std=c++17" + AC_CHECK_HEADERS([ctt.hpp], [libctt=yes], AC_MSG_ERROR([ctt.hpp not found. Try installing libctt-devel])) + AC_LANG_POP([C++]) +fi + +AM_CONDITIONAL(WITH_CTT, test x${libctt} = xyes && test x${withctt} = xyes) +if [[ -z "$WITH_CTT_TRUE" ]]; then + AC_DEFINE([WITH_CTT], [1], [Define to 1 if the ctt is available]) + LIBS="-lctt $LIBS" + RPM_REQUIRES+=" libctt" + RPM_BUILDREQ+=" libctt-devel" +fi + AC_ARG_WITH([pcap], AC_HELP_STRING([--with-pcap],[Compile ipfixprobe with pcap plugin for capturing using libpcap library]), [ @@ -382,6 +408,20 @@ AC_ARG_WITH([stem], [withstem="no"] ) +# Check if the telemetry library is available +AC_CHECK_LIB([telemetry], [main], + [AC_MSG_RESULT([Found telemetry library.])], + [AC_MSG_ERROR([The telemetry library is required but was not found. Try to install telemetry.])]) + +# Check if the appfs library is available +AC_CHECK_LIB([appFs], [main], + [AC_MSG_RESULT([Found appfs library.])], + [AC_MSG_ERROR([The appfs library is required but was not found. Try to install telemetry])]) + +LIBS="-lappFs -ltelemetry $LIBS" +RPM_REQUIRES+=" telemetry" +RPM_BUILDREQ+=" telemetry" + AM_CONDITIONAL(WITH_STEM, test x${withstem} = xyes) if [[ -z "$WITH_STEM_TRUE" ]]; then AC_DEFINE([WITH_STEM], [1], [Define to 1 to use flexprobe testing interface]) diff --git a/docs/404.html b/docs/404.html new file mode 100644 index 000000000..1590ef26d --- /dev/null +++ b/docs/404.html @@ -0,0 +1,6 @@ +--- +title: Not Found +description: This does not exist +permalink: /404.html +sitemap: false +--- diff --git a/docs/Gemfile b/docs/Gemfile new file mode 100644 index 000000000..984db5eef --- /dev/null +++ b/docs/Gemfile @@ -0,0 +1,11 @@ +source 'https://rubygems.org' +gem "webrick" +gem 'jekyll', '~> 4.2.0' + +group :jekyll_plugins do + gem 'jekyll-archives', '~> 2.2.1' + gem 'jekyll-feed', '~> 0.15.1' + gem 'jekyll-paginate', '~> 1.1.0' + gem 'jekyll-seo-tag', '~> 2.7.1' + gem 'jekyll-sitemap', '~> 1.4.0' +end diff --git a/docs/Gemfile.lock b/docs/Gemfile.lock new file mode 100644 index 000000000..444faada7 --- /dev/null +++ b/docs/Gemfile.lock @@ -0,0 +1,83 @@ +GEM + remote: https://rubygems.org/ + specs: + addressable (2.7.0) + public_suffix (>= 2.0.2, < 5.0) + colorator (1.1.0) + concurrent-ruby (1.1.9) + em-websocket (0.5.2) + eventmachine (>= 0.12.9) + http_parser.rb (~> 0.6.0) + eventmachine (1.2.7) + ffi (1.15.1) + forwardable-extended (2.6.0) + http_parser.rb (0.6.0) + i18n (1.8.10) + concurrent-ruby (~> 1.0) + jekyll (4.2.0) + addressable (~> 2.4) + colorator (~> 1.0) + em-websocket (~> 0.5) + i18n (~> 1.0) + jekyll-sass-converter (~> 2.0) + jekyll-watch (~> 2.0) + kramdown (~> 2.3) + kramdown-parser-gfm (~> 1.0) + liquid (~> 4.0) + mercenary (~> 0.4.0) + pathutil (~> 0.9) + rouge (~> 3.0) + safe_yaml (~> 1.0) + terminal-table (~> 2.0) + jekyll-archives (2.2.1) + jekyll (>= 3.6, < 5.0) + jekyll-feed (0.15.1) + jekyll (>= 3.7, < 5.0) + jekyll-paginate (1.1.0) + jekyll-sass-converter (2.1.0) + sassc (> 2.0.1, < 3.0) + jekyll-seo-tag (2.7.1) + jekyll (>= 3.8, < 5.0) + jekyll-sitemap (1.4.0) + jekyll (>= 3.7, < 5.0) + jekyll-watch (2.2.1) + listen (~> 3.0) + kramdown (2.3.1) + rexml + kramdown-parser-gfm (1.1.0) + kramdown (~> 2.0) + liquid (4.0.3) + listen (3.5.1) + rb-fsevent (~> 0.10, >= 0.10.3) + rb-inotify (~> 0.9, >= 0.9.10) + mercenary (0.4.0) + pathutil (0.16.2) + forwardable-extended (~> 2.6) + public_suffix (4.0.6) + rb-fsevent (0.11.0) + rb-inotify (0.10.1) + ffi (~> 1.0) + rexml (3.2.5) + rouge (3.26.0) + safe_yaml (1.0.5) + sassc (2.4.0) + ffi (~> 1.9) + terminal-table (2.0.0) + unicode-display_width (~> 1.1, >= 1.1.1) + unicode-display_width (1.7.0) + webrick (1.8.1) + +PLATFORMS + ruby + +DEPENDENCIES + jekyll (~> 4.2.0) + jekyll-archives (~> 2.2.1) + jekyll-feed (~> 0.15.1) + jekyll-paginate (~> 1.1.0) + jekyll-seo-tag (~> 2.7.1) + jekyll-sitemap (~> 1.4.0) + webrick + +BUNDLED WITH + 1.17.3 diff --git a/docs/LICENSE b/docs/LICENSE new file mode 100644 index 000000000..a4de04d2b --- /dev/null +++ b/docs/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016-2020 CloudCannon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..021470727 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,71 @@ +# ipfixprobe website Hydra + +Based on Hydra opensource template [live demo](https://proud-alligator.cloudvent.net/) + +## Features + +* Contact form +* Pre-built pages +* Pre-styled components +* Blog with pagination +* Post category pages +* Disqus comments for posts +* Staff and author system +* Configurable footer +* Optimised for editing in [CloudCannon](http://cloudcannon.com/) +* RSS/Atom feed +* SEO tags +* Google Analytics + +## Setup + +1. Add your site and author details in `_config.yml`. +2. Add your Google Analytics and Disqus keys to `_config.yml`. +3. Get a workflow going to see your site's output (with [CloudCannon](https://app.cloudcannon.com/) or Jekyll locally). + +## Develop + +Hydra was built with [Jekyll](http://jekyllrb.com/) version 3.3.1, but should support newer versions as well. + +Install the dependencies with [Bundler](http://bundler.io/): + +~~~bash +$ bundle install +~~~ + +Run `jekyll` commands through Bundler to ensure you're using the right versions: + +~~~bash +$ bundle exec jekyll serve +~~~ + +## Editing + +Hydra is already optimised for adding, updating and removing pages, staff, advice, company details and footer elements in CloudCannon. + +### Posts + +* Add, update or remove a post in the *Posts* collection. +* The **Staff Author** field links to members in the **Staff** collection. +* Documentation pages are organised in the navigation by category, with URLs based on the path inside the `_docs` folder. +* Change the defaults when new posts are created in `_posts/_defaults.md`. + +### Contact Form + +* Preconfigured to work with CloudCannon, but easily changed to another provider (e.g. [FormSpree](https://formspree.io/)). +* Sends email to the address listed in company details. + +### Staff + +* Reused around the site to save multiple editing locations. +* Add `excluded_in_search: true` to any documentation page's front matter to exclude that page in the search results. + +### Navigation + +* Exposed as a data file to give clients better access. +* Set in the *Data* / *Navigation* section. + +### Footer + +* Exposed as a data file to give clients better access. +* Set in the *Data* / *Footer* section. diff --git a/docs/_config.yml b/docs/_config.yml new file mode 100644 index 000000000..b9b831c87 --- /dev/null +++ b/docs/_config.yml @@ -0,0 +1,109 @@ +# ---- +# Site + +title: ipfixprobe - High-performance IPFIX probe +url: "https://cesnet.github.io/ipfixprobe/" +baseurl: https://cesnet.github.io/ipfixprobe/ # comment out for local development +#baseurl: # uncomment for local development +google_analytics_key: +google_maps_javascript_api_key: +disqus_shortname: + +# Values for the jekyll-seo-tag gem (https://github.com/jekyll/jekyll-seo-tag) +logo: /siteicon.svg +description: "ipfixprobe is a tool for creating network flows and exporting them to a remote collector using the IPFIX protocol. It is an essential tool for maintinaing network security." +author: + name: "Karel Hynek (CESNET z.s.p.o.)" + email: "hynekkar@cesnet.cz" + twitter: # twitter username without the @ symbol +social: + name: "Hydra Template" + links: + - https://github.com/CloudCannon/hydra-jekyll-template + +# ----- +# Build + +timezone: Etc/UTC + +collections: + staff_members: + _hide_content: true + get_options: + _hide_content: true + how: + _hide_content: true + output: true + export: + _hide_content: true + +paginate: 10 +paginate_path: "/blog/:num/" +permalink: pretty + +defaults: + - scope: + path: "" + type: "posts" + values: + layout: "post" + _options: + content: + width: 1500 + height: 2500 + - scope: + path: "" + type: "staff_members" + values: + _options: + image_path: + width: 600 + height: 600 + - scope: + path: "" + values: + layout: "page" + - scope: + path: "index.html" + values: + layout: "default" + - scope: + path: "get_options" + values: + layout: "post" + - scope: + path: "contact.html" + values: + full_width: true + +jekyll-archives: + enabled: + - categories + +plugins: + - jekyll-archives + - jekyll-sitemap + - jekyll-seo-tag + - jekyll-feed + - jekyll-paginate + +exclude: + - Gemfile + - Gemfile.lock + - README.md + - LICENCE + +# ----------- +# CloudCannon + +_select_data: + social_icons: + - Facebook + - Instagram + - LinkedIn + - Pinterest + - Tumblr + - Twitter + - YouTube + - RSS + diff --git a/docs/_data/footer.yml b/docs/_data/footer.yml new file mode 100644 index 000000000..6c20371af --- /dev/null +++ b/docs/_data/footer.yml @@ -0,0 +1,5 @@ +- links: + - name: GitHub + link: https://github.com/CESNET/ipfixprobe + new_window: true + social_icon: GitHub \ No newline at end of file diff --git a/docs/_data/navigation.yml b/docs/_data/navigation.yml new file mode 100644 index 000000000..3535d2c68 --- /dev/null +++ b/docs/_data/navigation.yml @@ -0,0 +1,16 @@ +- name: "Installation" + link: /get_options/ + new_window: false + highlight: false +- name: "How to use it" + link: /how/ + new_window: false + highlight: false +- name: "Developer" + link: /developer/ + new_window: false + highlight: false +- name: "Export Data" + link: /export/ + new_window: false + highlight: false diff --git a/docs/_export/BSTATS.md b/docs/_export/BSTATS.md new file mode 100644 index 000000000..a25544eb4 --- /dev/null +++ b/docs/_export/BSTATS.md @@ -0,0 +1,45 @@ +--- +title: BSTATS +description: List of fields exported together with basic flow fields on the interface by BSTATS plugin. The plugin is compiled to export the first BSTATS_MAXELENCOUNT (15 by default) burst in each direction. The bursts are computed separately for each direction. Burst is defined by MINIMAL_PACKETS_IN_BURST (3 by default) and by MAXIMAL_INTERPKT_TIME (1000 ms by default) between packets to be included in a burst. When the flow contains less then MINIMAL_PACKETS_IN_BURST packets, the fields are not exported to reduce output bandwidth. +fields: + - + name: "SBI_BRST_PACKETS" + type: "uint32*" + ipfix: "0/291" + value: " SRC->DST: Number of packets transmitted in ith burst" + - + name: "SBI_BRST_BYTES" + type: "uint32*" + ipfix: "0/291" + value: " SRC->DST: Number of bytes transmitted in ith burst" + - + name: "SBI_BRST_TIME_START" + type: "time*" + ipfix: "0/291" + value: " SRC->DST: Start time of the ith burst" + - + name: "SBI_BRST_TIME_STOP" + type: "time*" + ipfix: "0/291" + value: " SRC->DST: End time of the ith burst" + - + name: "DBI_BRST_PACKETS" + type: "uint32*" + ipfix: "0/291" + value: " DST->SRC: Number of packets transmitted in ith burst" + - + name: "DBI_BRST_BYTES" + type: "uint32*" + ipfix: "0/291" + value: " DST->SRC: Number of bytes transmitted in ith burst" + - + name: "DBI_BRST_TIME_START" + type: "time*" + ipfix: "0/291" + value: " DST->SRC: Start time of the ith burst" + - + name: "DBI_BRST_TIME_STOP" + type: "time*" + ipfix: "0/291" + value: " DST->SRC: End time of the ith burst" +--- \ No newline at end of file diff --git a/docs/_export/DNS-SD.md b/docs/_export/DNS-SD.md new file mode 100644 index 000000000..8e3a00e96 --- /dev/null +++ b/docs/_export/DNS-SD.md @@ -0,0 +1,15 @@ +--- +title: DNS-SD +description: List of unirec fields exported together with basic flow fields on interface by DNS-SD plugin. +fields: + - + name: "DNSSD_QUERIES" + type: "string" + ipfix: "8057/826" + value: " list of queries for services" + - + name: "DNSSD_RESPONSES" + type: "string" + ipfix: "8057/827" + value: " list of advertised services" +--- \ No newline at end of file diff --git a/docs/_export/DNS.md b/docs/_export/DNS.md new file mode 100644 index 000000000..d9ce1c52d --- /dev/null +++ b/docs/_export/DNS.md @@ -0,0 +1,60 @@ +--- +title: DNS +description: List of unirec fields exported together with basic flow fields on interface by DNS plugin. +fields: + - + name: "DNS_ID" + type: "uint16" + ipfix: "8057/10" + value: "transaction ID" + - + name: "DNS_ANSWERS" + type: "uint16" + ipfix: "8057/14" + value: "number of DNS answer records" + - + name: "DNS_RCODE" + type: "uint8" + ipfix: "8057/1" + value: "response code field" + - + name: "DNS_NAME" + type: "string" + ipfix: "8057/2" + value: "question domain name" + - + name: "DNS_QTYPE" + type: "uint16" + ipfix: "8057/3" + value: "question type field" + - + name: "DNS_CLASS" + type: "uint16" + ipfix: "8057/4" + value: "class field of DNS question" + - + name: "DNS_RR_TTL" + type: "uint32" + ipfix: "8057/5" + value: "resource record TTL field" + - + name: "DNS_RLENGTH" + type: "uint16" + ipfix: "8057/6" + value: "length of DNS_RDATA" + - + ipfix: "8057/7" + name: "DNS_RDATA" + type: "bytes" + value: "resource record specific data" + - + name: "DNS_PSIZE" + type: "uint16" + ipfix: "8057/8" + value: "requestor's payload size" + - + name: "DNS_DO" + type: "uint8" + ipfix: "8057/9" + value: "DNSSEC OK bit" +--- \ No newline at end of file diff --git a/docs/_export/Flow Hash.md b/docs/_export/Flow Hash.md new file mode 100644 index 000000000..99fa1b730 --- /dev/null +++ b/docs/_export/Flow Hash.md @@ -0,0 +1,10 @@ +--- +title: Flow Hash +description: List of fields exported together with basic flow fields on interface by flow_hash plugin. +fields: + - + name: "FLOW_ID" + type: "uint64" + ipfix: "0/148" + value: " Hash of the flow - unique flow id" +--- \ No newline at end of file diff --git a/docs/_export/HTTP.md b/docs/_export/HTTP.md new file mode 100644 index 000000000..7d6a57c73 --- /dev/null +++ b/docs/_export/HTTP.md @@ -0,0 +1,50 @@ +--- +title: HTTP +description: List of unirec fields exported together with basic flow fields on interface by HTTP plugin. +fields: + - + name: "HTTP_DOMAIN" + type: "string" + ipfix: "39499/1" + value: "HTTP request host" + - + name: "HTTP_URI" + type: "string" + ipfix: "39499/2" + value: "HTTP request url" + - + name: "HTTP_USERAGENT" + type: "string" + ipfix: "39499/20" + value: "HTTP request user agent" + - + name: "HTTP_REFERER" + type: "string" + ipfix: "39499/3" + value: "HTTP request referer" + - + name: "HTTP_STATUS" + type: "uint16" + ipfix: "39499/12" + value: "HTTP response code" + - + name: "HTTP_CONTENT_TYPE" + type: "string" + ipfix: "39499/10" + value: "HTTP response content type" + - + name: "HTTP_METHOD" + type: "string" + ipfix: "39499/200" + value: "HTTP request method" + - + name: "HTTP_SERVER" + type: "string" + ipfix: "39499/201" + value: "HTTP response server" + - + name: "HTTP_SET_COOKIE_NAMES" + type: "string" + ipfix: "39499/202" + value: "HTTP response all set-cookie names separated by a delimiter" +--- \ No newline at end of file diff --git a/docs/_export/ICMP.md b/docs/_export/ICMP.md new file mode 100644 index 000000000..25bbb093d --- /dev/null +++ b/docs/_export/ICMP.md @@ -0,0 +1,11 @@ +--- +title: ICMP +description: List of fields exported together with basic flow fields on interface by icmp plugin. +fields: + - + name: "L4_ICMP_TYPE_CODE" + type: "uint16" + ipfix: "0/32" + value: " ICMP type (MSB) and code (LSB)" + +--- \ No newline at end of file diff --git a/docs/_export/IDPContent.md b/docs/_export/IDPContent.md new file mode 100644 index 000000000..387a4e7ae --- /dev/null +++ b/docs/_export/IDPContent.md @@ -0,0 +1,15 @@ +--- +title: IDPContent +description: List of fields exported together with basic flow fields on the interface by IDPContent plugin. The plugin is compiled to export IDPCONTENT_SIZE (100 by default) bytes from the first data packet in SRC -> DST direction, and the first data packet in DST -> SRC direction. +fields: + - + name: "IDP_CONTENT" + type: "bytes" + ipfix: "8057/850" + value: " Content of first data packet from SRC -> DST" + - + name: "IDP_CONTENT_REV" + type: "bytes" + ipfix: "8057/851" + value: " Content of first data packet from DST -> SRC" +--- \ No newline at end of file diff --git a/docs/_export/MPLS.md b/docs/_export/MPLS.md new file mode 100644 index 000000000..3d3d318d4 --- /dev/null +++ b/docs/_export/MPLS.md @@ -0,0 +1,10 @@ +--- +title: MPLS +description: List of fields exported together with basic flow fields on interface by mpls plugin. +fields: + - + name: "MPLS_TOP_LABEL_STACK_SECTION" + type: "bytes" + ipfix: "0/70" + value: " MPLS label section (without TTL), always 3 bytes" +--- \ No newline at end of file diff --git a/docs/_export/MQTT.md b/docs/_export/MQTT.md new file mode 100644 index 000000000..7227e4926 --- /dev/null +++ b/docs/_export/MQTT.md @@ -0,0 +1,40 @@ +--- +title: MQTT +description: List of unirec fields exported together with basic flow fields on interface by MQTT plugin. +fields: + - + name: "MQTT_TYPE_CUMULATIVE" + type: "uint16" + ipfix: "8057/1033" + value: " types of packets and session present flag cumulative" + - + name: "MQTT_VERSION" + type: "uint8" + ipfix: "8057/1034" + value: " MQTT version" + - + name: "MQTT_CONNECTION_FLAGS" + type: "uint8" + ipfix: "8057/1035" + value: " last CONNECT packet flags" + - + name: "MQTT_KEEP_ALIVE" + type: "uint16" + ipfix: "8057/1036" + value: " last CONNECT keep alive" + - + name: "MQTT_CONNECTION_RETURN_CODE" + type: "uint8" + ipfix: "8057/1037" + value: " last CONNECT return code" + - + name: "MQTT_PUBLISH_FLAGS" + type: "uint8" + ipfix: "8057/1038" + value: " cumulative of PUBLISH packet flags" + - + name: "MQTT_TOPICS" + type: "string" + ipfix: "8057/1039" + value: " topics from PUBLISH packets headers" +--- \ No newline at end of file diff --git a/docs/_export/NTP.md b/docs/_export/NTP.md new file mode 100644 index 000000000..4dded8e41 --- /dev/null +++ b/docs/_export/NTP.md @@ -0,0 +1,70 @@ +--- +title: NTP +description: List of unirec fields exported together with basic flow fields on interface by NTP plugin. +fields: + - + name: "NTP_LEAP" + type: "uint8" + ipfix: "8057/18" + value: " NTP leap field" + - + name: "NTP_VERSION" + type: "uint8" + ipfix: "8057/19" + value: " NTP message version" + - + name: "NTP_MODE" + type: "uint8" + ipfix: "8057/20" + value: " NTP mode field" + - + name: "NTP_STRATUM" + type: "uint8" + ipfix: "8057/21" + value: " NTP stratum field" + - + name: "NTP_POLL" + type: "uint8" + ipfix: "8057/22" + value: " NTP poll interval" + - + name: "NTP_PRECISION" + type: "uint8" + ipfix: "8057/23" + value: " NTP precision field" + - + name: "NTP_DELAY" + type: "uint32" + ipfix: "8057/24" + value: " NTP root delay" + - + name: "NTP_DISPERSION" + type: "uint32" + ipfix: "8057/25" + value: " NTP root dispersion" + - + name: "NTP_REF_ID" + type: "string" + ipfix: "8057/26" + value: " NTP reference ID" + - + name: "NTP_REF" + type: "string" + ipfix: "8057/27" + value: " NTP reference timestamp" + - + name: "NTP_ORIG" + type: "string" + ipfix: "8057/28" + value: " NTP origin timestamp" + - + name: "NTP_RECV" + type: "string" + ipfix: "8057/29" + value: " NTP receive timestamp" + - + name: "NTP_SENT" + type: "string" + ipfix: "8057/30" + value: " NTP transmit timestamp" +--- \ No newline at end of file diff --git a/docs/_export/NetBIOS.md b/docs/_export/NetBIOS.md new file mode 100644 index 000000000..b719854a4 --- /dev/null +++ b/docs/_export/NetBIOS.md @@ -0,0 +1,15 @@ +--- +title: NetBIOS +description: List of fields exported together with basic flow fields on interface by NetBIOS plugin. +fields: + - + name: "NB_NAME" + type: "string" + ipfix: "8057/831" + value: " NetBIOS Name Service name" + - + name: "NB_SUFFIX" + type: "uint8" + ipfix: "8057/832" + value: " NetBIOS Name Service suffix" +--- \ No newline at end of file diff --git a/docs/_export/NetTiSA.md b/docs/_export/NetTiSA.md new file mode 100644 index 000000000..c61fb67ce --- /dev/null +++ b/docs/_export/NetTiSA.md @@ -0,0 +1,70 @@ +--- +title: NetTiSA +description: List of unirec fields exported together with NetTiSA flow fields on interface by nettisa plugin. +fields: + - + name: "NTS_MEAN" + type: "float" + ipfix: "8057/1020" + value: "The mean of the payload lengths of packets" + - + name: "NTS_MIN" + type: "uint16" + ipfix: "8057/1021" + value: "Minimal value from all packet payload lengths" + - + name: "NTS_MAX" + type: "uint16" + ipfix: "8057/1022" + value: "Maximum value from all packet payload lengths" + - + name: "NTS_STDEV" + type: "float" + ipfix: "8057/1023" + value: "Represents a switching ratio between different values of the sequence of observation." + - + name: "NTS_KURTOSIS" + type: "float" + ipfix: "8057/1024" + value: "The standard deviation is measure of the variation of data from the mean." + - + name: "NTS_ROOT_MEAN_SQUARE" + type: "float" + ipfix: "8057/1025" + value: "The measure of the magnitude of payload lengths of packets." + - + name: "NTS_AVERAGE_DISPERSION" + type: "float" + ipfix: "8057/1026" + value: "The average absolute difference between each payload length of packet and the mean value." + - + name: "NTS_MEAN_SCALED_TIME" + type: "float" + ipfix: "8057/1027" + value: "The kurtosis is the measure describing the extent to which the tails of a distribution differ from the tails of a normal distribution." + - + name: "NTS_MEAN_DIFFTIMES" + type: "float" + ipfix: "8057/1028" + value: "The scaled times is defined as sequence s(t) = t1 − t1 , t2 − t1 , … , tn − t1 . We compute the mean of the value with same method as for feature Mean." + - + name: "NTS_MIN_DIFFTIMES" + type: "float" + ipfix: "8057/1029" + value: "The time differences is defined as sequence dt = tj - ti | j = i + 1, i in 1, 2, ... n - 1. We compute the mean of the value with same method as for feature Mean." + - + name: "NTS_MAX_DIFFTIMES" + type: "float" + ipfix: "8057/1030" + value: "Minimal value from all time differences, i.e., min space between packets." + - + name: "NTS_TIME_DISTRIBUTION" + type: "float" + ipfix: "8057/1031" + value: "Maximum value from all time differences, i.e., max space between packets." + - + name: "NTS_SWITCHING_RATIO" + type: "float" + ipfix: "8057/1032" + value: "Describes the distribution of time differences between individual packets." +--- \ No newline at end of file diff --git a/docs/_export/OSQUERY.md b/docs/_export/OSQUERY.md new file mode 100644 index 000000000..98df7e19d --- /dev/null +++ b/docs/_export/OSQUERY.md @@ -0,0 +1,60 @@ +--- +title: OSQUERY +description: List of unirec fields exported together with basic flow fields on interface by OSQUERY plugin. +fields: + - + name: "PROGRAM_NAME" + type: "string" + ipfix: "8057/852" + value: " The name of the program that handles the connection" + - + name: "USERNAME" + type: "string" + ipfix: "8057/853" + value: " The name of the user who starts the process" + - + name: "OS_NAME" + type: "string" + ipfix: "8057/854" + value: " Distribution or product name" + - + name: "OS_MAJOR" + type: "uint16" + ipfix: "8057/855" + value: " Major release version" + - + name: "OS_MINOR" + type: "uint16" + ipfix: "8057/856" + value: " Minor release version" + - + name: "OS_BUILD" + type: "string" + ipfix: "8057/857" + value: " Optional build-specific or variant string" + - + name: "OS_PLATFORM" + type: "string" + ipfix: "8057/858" + value: " OS Platform or ID" + - + name: "OS_PLATFORM_LIKE" + type: "string" + ipfix: "8057/859" + value: " Closely related platforms" + - + name: "OS_ARCH" + type: "string" + ipfix: "8057/860" + value: " OS Architecture" + - + name: "KERNEL_VERSION" + type: "string" + ipfix: "8057/861" + value: " Kernel version" + - + name: "SYSTEM_HOSTNAME" + type: "string" + ipfix: "8057/862" + value: " Network hostname including domain" +--- \ No newline at end of file diff --git a/docs/_export/OVPN.md b/docs/_export/OVPN.md new file mode 100644 index 000000000..17901ea90 --- /dev/null +++ b/docs/_export/OVPN.md @@ -0,0 +1,11 @@ +--- +title: OVPN +description: List of fields exported together with basic flow fields on interface by OVPN plugin. +fields: + - + name: "OVPN_CONF_LEVEL" + type: "uint8" + ipfix: "8057/828" + value: " level of confidence that the flow record is an OpenVPN tunnel" + +--- \ No newline at end of file diff --git a/docs/_export/PHISTS.md b/docs/_export/PHISTS.md new file mode 100644 index 000000000..987294628 --- /dev/null +++ b/docs/_export/PHISTS.md @@ -0,0 +1,26 @@ +--- +title: PHISTS +description: List of fields exported together with basic flow fields on the interface by PHISTS plugin. The plugin exports the histograms of Payload sizes and Inter-Packet-Times for each direction. The histograms bins are scaled logarithmicaly and are shown in following table. +fields: + - + name: "D_PHISTS_IPT" + type: "uint32*" + ipfix: "0/291" + value: " DST->SRC: Histogram of interpacket times" + - + name: "D_PHISTS_SIZES" + type: "uint32*" + ipfix: "0/291" + value: " DST->SRC: Histogram of packet sizes" + - + name: "S_PHISTS_IPT" + type: "uint32*" + ipfix: "0/291" + value: " SRC->DST: Histogram of interpacket times" + - + name: "S_PHISTS_SIZES" + type: "uint32*" + ipfix: "0/291" + value: " SRC->DST: Histogram of packet sizes" + +--- \ No newline at end of file diff --git a/docs/_export/PSTATS.md b/docs/_export/PSTATS.md new file mode 100644 index 000000000..9715d3bdc --- /dev/null +++ b/docs/_export/PSTATS.md @@ -0,0 +1,25 @@ +--- +title: PSTATS +description: "List of unirec fields exported on interface by PSTATS plugin. The plugin is compiled to gather statistics for the first PSTATS_MAXELEMCOUNT (30 by default) packets in the biflow record. Note: the following fields are UniRec arrays (or basicList in IPFIX)." +fields: + - + name: "PPI_PKT_LENGTHS" + type: "uint16*" + ipfix: "0/291" + value: " sizes of the first packets" + - + name: "PPI_PKT_TIMES" + type: "time*" + ipfix: "0/291" + value: " timestamps of the first packets" + - + name: "PPI_PKT_DIRECTIONS" + type: "int8*" + ipfix: "0/291" + value: " directions of the first packets" + - + name: "PPI_PKT_FLAGS" + type: "uint8*" + ipfix: "0/291" + value: " TCP flags for each packet" +--- \ No newline at end of file diff --git a/docs/_export/PassiveDNS.md b/docs/_export/PassiveDNS.md new file mode 100644 index 000000000..0a401d978 --- /dev/null +++ b/docs/_export/PassiveDNS.md @@ -0,0 +1,26 @@ +--- +title: PassiveDNS +description: List of unirec fields exported together with basic flow fields on interface by PassiveDNS plugin. +fields: + - + name: "DNS_ID" + type: "uint16" + ipfix: "8057/10" + value: " transaction ID" + - + name: "DNS_ATYPE" + type: "uint8" + ipfix: "8057/11" + value: " response record type" + - + name: "DNS_NAME" + type: "string" + ipfix: "8057/2" + value: " question domain name" + - + name: "DNS_RR_TTL" + type: "uint32" + ipfix: "8057/5" + value: " resource record TTL field" + +--- \ No newline at end of file diff --git a/docs/_export/QUIC.md b/docs/_export/QUIC.md new file mode 100644 index 000000000..b2f2395a8 --- /dev/null +++ b/docs/_export/QUIC.md @@ -0,0 +1,90 @@ +--- +title: QUIC +description: List of fields exported together with basic flow fields on interface by quic plugin. -with-quic-ch-full-tls-ext enables extraction of all TLS extensions in the Client Hello. +fields: + - + name: "QUIC_SNI" + type: "string" + ipfix: "8057/890" + value: " Decrypted server name" + - + name: "QUIC_USER_AGENT" + type: "string" + ipfix: "8057/891" + value: " Decrypted user agent" + - + name: "QUIC_VERSION" + type: "uint32" + ipfix: "8057/892" + value: " QUIC version from first server long header packets" + - + name: "QUIC_CLIENT_VERSION" + type: "uint32" + ipfix: "8057/893" + value: " QUIC version from first client long header packet" + - + name: "QUIC_TOKEN_LENGTH" + type: "uint64" + ipfix: "8057/894" + value: " Token length from Initial and Retry packets" + - + name: "QUIC_OCCID" + type: "bytes" + ipfix: "8057/895" + value: " Source Connection ID from first client packet" + - + name: "QUIC_OSCID" + type: "bytes" + ipfix: "8057/896" + value: " Destination Connection ID from first client packet" + - + name: "QUIC_SCID" + type: "bytes" + ipfix: "8057/897" + value: " Source Connection ID from first server packet" + - + name: "QUIC_RETRY_SCID" + type: "bytes" + ipfix: "8057/898" + value: " Source Connection ID from Retry packet" + - + name: "QUIC_MULTIPLEXED" + type: "uint8" + ipfix: "8057/899" + value: " > 0 if multiplexed (at least two different QUIC_OSCIDs or SNIs)" + - + name: "QUIC_ZERO_RTT" + type: "uint8" + ipfix: "8057/889" + value: " Number of 0-RTT packets in flow." + - + name: "QUIC_SERVER_PORT" + type: "uint16" + ipfix: "8057/887" + value: " TODO Server Port determined by packet type and TLS message" + - + name: "QUIC_PACKETS" + type: "uint8*" + ipfix: "0/291" + value: " QUIC long header packet type (v1 encoded), version negotiation, QUIC bit" + - + name: "QUIC_CH_PARSED" + type: "uint8" + ipfix: "8057/886" + value: " >0 if TLS Client Hello parsed without errors" + - + name: "QUIC_TLS_EXT_TYPE" + type: "uint16*" + ipfix: "0/291" + value: " TLS extensions in the TLS Client Hello" + - + name: "QUIC_TLS_EXT_LEN" + type: "uint16*" + ipfix: "0/291" + value: " Length of each TLS extension" + - + name: "QUIC_TLS_EXT" + type: "string" + ipfix: "8057/883" + value: " Payload of all/application_layer_protocol_negotiation and quic_transport params TLS extension" +--- \ No newline at end of file diff --git a/docs/_export/RTSP.md b/docs/_export/RTSP.md new file mode 100644 index 000000000..1f7aa25cd --- /dev/null +++ b/docs/_export/RTSP.md @@ -0,0 +1,35 @@ +--- +title: RTSP +description: List of unirec fields exported together with basic flow fields on interface by RTSP plugin. +fields: + - + name: "RTSP_REQUEST_METHOD" + type: "string" + ipfix: "16982/600" + value: "RTSP request method name" + - + name: "RTSP_REQUEST_AGENT" + type: "string" + ipfix: "16982/601" + value: "RTSP request user agent" + - + name: "RTSP_REQUEST_URI" + type: "string" + ipfix: "16982/602" + value: "RTSP request URI" + - + name: "RTSP_RESPONSE_STATUS_CODE" + type: "uint16" + ipfix: "16982/603" + value: "RTSP response status code" + - + name: "RTSP_RESPONSE_SERVER" + type: "string" + ipfix: "16982/605" + value: "RTSP response server field" + - + name: "RTSP_RESPONSE_CONTENT_TYPE" + type: "string" + ipfix: "16982/604" + value: "RTSP response content type" +--- \ No newline at end of file diff --git a/docs/_export/SIP.md b/docs/_export/SIP.md new file mode 100644 index 000000000..c6466b040 --- /dev/null +++ b/docs/_export/SIP.md @@ -0,0 +1,50 @@ +--- +title: SIP +description: List of unirec fields exported together with basic flow fields on interface by SIP plugin. +fields: + - + name: "SIP_MSG_TYPE" + type: "uint16" + ipfix: "8057/100" + value: " SIP message code" + - + name: "SIP_STATUS_CODE" + type: "uint16" + ipfix: "8057/101" + value: " status of the SIP request" + - + name: "SIP_CSEQ" + type: "string" + ipfix: "8057/108" + value: " CSeq field of SIP packet" + - + name: "SIP_CALLING_PARTY" + type: "string" + ipfix: "8057/103" + value: " calling party (from) URI" + - + name: "SIP_CALLED_PARTY" + type: "string" + ipfix: "8057/104" + value: " called party (to) URI" + - + name: "SIP_CALL_ID" + type: "string" + ipfix: "8057/102" + value: " call ID" + - + name: "SIP_USER_AGENT" + type: "string" + ipfix: "8057/106" + value: " user agent field of SIP packet" + - + name: "SIP_REQUEST_URI" + type: "string" + ipfix: "8057/107" + value: " SIP request URI" + - + name: "SIP_VIA" + type: "string" + ipfix: "8057/105" + value: " via field of SIP packet" +--- \ No newline at end of file diff --git a/docs/_export/SMTP.md b/docs/_export/SMTP.md new file mode 100644 index 000000000..e5f5526c3 --- /dev/null +++ b/docs/_export/SMTP.md @@ -0,0 +1,60 @@ +--- +title: SMTP +description: List of unirec fields exported on interface by SMTP plugin. +fields: + - + name: "SMTP_2XX_STAT_CODE_COUNT" + type: "uint32" + ipfix: "8057/816" + value: " number of 2XX status codes" + - + name: "SMTP_3XX_STAT_CODE_COUNT" + type: "uint32" + ipfix: "8057/817" + value: " number of 3XX status codes" + - + name: "SMTP_4XX_STAT_CODE_COUNT" + type: "uint32" + ipfix: "8057/818" + value: " number of 4XX status codes" + - + name: "SMTP_5XX_STAT_CODE_COUNT" + type: "uint32" + ipfix: "8057/819" + value: " number of 5XX status codes" + - + name: "SMTP_COMMAND_FLAGS" + type: "uint32" + ipfix: "8057/810" + value: " bit array of commands present" + - + name: "SMTP_MAIL_CMD_COUNT" + type: "uint32" + ipfix: "8057/811" + value: " number of MAIL commands" + - + name: "SMTP_RCPT_CMD_COUNT" + type: "uint32" + ipfix: "8057/812" + value: " number of RCPT commands" + - + name: "SMTP_STAT_CODE_FLAGS" + type: "uint32" + ipfix: "8057/815" + value: " bit array of status codes present" + - + name: "SMTP_DOMAIN" + type: "string" + ipfix: "8057/820" + value: " domain name of the SMTP client" + - + name: "SMTP_FIRST_SENDER" + type: "string" + ipfix: "8057/813" + value: " first sender in MAIL command" + - + name: "SMTP_FIRST_RECIPIENT" + type: "string" + ipfix: "8057/814" + value: " first recipient in RCPT command" +--- \ No newline at end of file diff --git a/docs/_export/SSADetector.md b/docs/_export/SSADetector.md new file mode 100644 index 000000000..561bc0b54 --- /dev/null +++ b/docs/_export/SSADetector.md @@ -0,0 +1,10 @@ +--- +title: SSADetector +description: List of fields exported together with basic flow fields on interface by ssadetector plugin. The detector search for the SYN SYN-ACK ACK pattern in packet lengths. Multiple occurrences of this pattern suggest a tunneled connection. +fields: + - + name: "SSA_CONF_LEVEL" + type: "uint8" + ipfix: "8057/903" + value: " 1 if SSA sequence detected, 0 otherwise" +--- \ No newline at end of file diff --git a/docs/_export/SSDP.md b/docs/_export/SSDP.md new file mode 100644 index 000000000..b2ca6bfd0 --- /dev/null +++ b/docs/_export/SSDP.md @@ -0,0 +1,30 @@ +--- +title: SSDP +description: List of unirec fields exported together with basic flow fields on interface by SSDP plugin. +fields: + - + name: "SSDP_LOCATION_PORT" + type: "uint16" + ipfix: "8057/821" + value: " service port" + - + name: "SSDP_NT" + type: "string" + ipfix: "8057/824" + value: " list of advertised service urns" + - + name: "SSDP_SERVER" + type: "string" + ipfix: "8057/822" + value: " server info" + - + name: "SSDP_ST" + type: "string" + ipfix: "8057/825" + value: " list of queried service urns" + - + name: "SSDP_USER_AGENT" + type: "string" + ipfix: "8057/823" + value: " list of user agents" +--- \ No newline at end of file diff --git a/docs/_export/TLS.md b/docs/_export/TLS.md new file mode 100644 index 000000000..7c51c81ee --- /dev/null +++ b/docs/_export/TLS.md @@ -0,0 +1,35 @@ +--- +title: TLS +description: List of unirec fields exported together with basic flow fields on interface by TLS plugin. +fields: + - + name: "TLS_SNI" + type: "string" + ipfix: "8057/808" + value: "TLS server name indication field from client" + - + name: "TLS_ALPN" + type: "string" + ipfix: "39499/337" + value: "TLS application protocol layer negotiation field from server" + - + name: "TLS_VERSION" + type: "uint16" + ipfix: "39499/333" + value: "TLS client protocol version" + - + name: "TLS_JA3" + type: "string" + ipfix: "39499/357" + value: "TLS client JA3 fingerprint" + - + name: "TLS_EXT_TYPE" + type: "uint16" + ipfix: "0/291" + value: "TLS extensions in the TLS Client Hello" + - + name: "TLS_EXT_LEN" + type: "uint16" + ipfix: "0/291" + value: "Length of each TLS extension" +--- \ No newline at end of file diff --git a/docs/_export/VLAN.md b/docs/_export/VLAN.md new file mode 100644 index 000000000..9b7777c94 --- /dev/null +++ b/docs/_export/VLAN.md @@ -0,0 +1,11 @@ +--- +title: VLAN +description: List of fields exported together with basic flow fields on the interface by VLAN plugin. +fields: + - + name: "VLAN_ID" + type: "uint16" + ipfix: "0/58" + value: " Vlan ID (used in flow key)" + +--- \ No newline at end of file diff --git a/docs/_export/WG.md b/docs/_export/WG.md new file mode 100644 index 000000000..75658e31c --- /dev/null +++ b/docs/_export/WG.md @@ -0,0 +1,21 @@ +--- +title: WG +description: List of fields exported together with basic flow fields on interface by WG plugin. +fields: + - + name: "WG_CONF_LEVEL" + type: "uint8" + ipfix: "8057/1100" + value: " level of confidence that the flow record is a WireGuard tunnel" + - + name: "WG_SRC_PEER" + type: "uint32" + ipfix: "8057/1101" + value: " ephemeral SRC peer identifier" + - + name: "WG_DST_PEER" + type: "uint32" + ipfix: "8057/1102" + value: " ephemeral DST peer identifier" + +--- \ No newline at end of file diff --git a/docs/_export/basic.md b/docs/_export/basic.md new file mode 100644 index 000000000..3c2a7d1c0 --- /dev/null +++ b/docs/_export/basic.md @@ -0,0 +1,90 @@ +--- +title: Basic +description: Basic unirec fields exported on interface with basic (pseudo) plugin. These fields are also exported on interfaces where HTTP, DNS, SIP and NTP plugins are active. +fields: + - + name: "DST_MAC" + type: "macaddr" + ipfix: "0/80" + value: "destination MAC address" + - + name: "SRC_MAC" + type: "macaddr" + ipfix: "0/56" + value: "source MAC address" + - + name: "DST_IP" + type: "ipaddr" + ipfix: "0/12 or 0/28" + value: "destination IP address" + - + name: "SRC_IP" + type: "ipaddr" + ipfix: "0/8 or 0/27" + value: "source IP address" + - + name: "BYTES" + type: "uint64" + ipfix: "0/1" + value: "number of bytes in data flow (src to dst)" + - + name: "BYTES_REV" + type: "uint64" + ipfix: "29305/1" + value: "number of bytes in data flow (dst to src)" + - + name: "LINK_BIT_FIELD or ODID" + type: "uint64 or uint32" + ipfix: "-" + value: "exporter identification" + - + name: "TIME_FIRST" + type: "time" + ipfix: "0/152" + value: "first time stamp" + - + name: "TIME_LAST" + type: "time" + ipfix: "0/153" + value: "last time stamp" + - + name: "PACKETS" + type: "uint32" + ipfix: "0/2" + value: "number of packets in data flow (src to dst)" + - + name: "PACKETS_REV" + type: "uint32" + ipfix: "29305/2" + value: "number of packets in data flow (dst to src)" + - + name: "DST_PORT" + type: "uint16" + ipfix: "0/11" + value: "transport layer destination port" + - + name: "SRC_PORT" + type: "uint16" + ipfix: "0/7" + value: "transport layer source port" + - + name: "DIR_BIT_FIELD" + type: "uint8" + ipfix: "0/10" + value: "bit field for determining outgoing/incoming traffic" + - + name: "PROTOCOL" + type: "uint8" + ipfix: "0/60" + value: "transport protocol" + - + name: "TCP_FLAGS" + type: "uint8" + ipfix: "0/6" + value: "TCP protocol flags (src to dst)" + - + name: "TCP_FLAGS_REV" + type: "uint8" + ipfix: "29305/6" + value: "TCP protocol flags (dst to src)" +--- \ No newline at end of file diff --git a/docs/_export/basic_plus.md b/docs/_export/basic_plus.md new file mode 100644 index 000000000..98f75a334 --- /dev/null +++ b/docs/_export/basic_plus.md @@ -0,0 +1,60 @@ +--- +title: Basic plus +description: List of unirec fields exported together with basic flow fields on interface by basicplus plugin. Fields without _REV suffix are fields from source flow. Fields with _REV are from the opposite direction. +fields: + - + name: "IP_TTL" + type: "uint8" + ipfix: "0/192" + value: "IP TTL field" + - + name: "IP_TTL_REV" + type: "uint8" + ipfix: "29305/192" + value: "IP TTL field" + - + name: "IP_FLG" + type: "uint8" + ipfix: "0/197" + value: "IP FLAGS" + - + name: "IP_FLG_REV" + type: "uint8" + ipfix: "29305/197" + value: "IP FLAGS" + - + name: "TCP_WIN" + type: "uint16" + ipfix: "0/186" + value: "TCP window size" + - + name: "TCP_WIN_REV" + type: "uint16" + ipfix: "29305/186" + value: "TCP window size" + - + name: "TCP_OPT" + type: "uint64" + ipfix: "0/209" + value: "TCP options bitfield" + - + name: "TCP_OPT_REV" + type: "uint64" + ipfix: "29305/209" + value: "TCP options bitfield" + - + name: "TCP_MSS" + type: "uint32" + ipfix: "8057/900" + value: "TCP maximum segment size" + - + name: "TCP_MSS_REV" + type: "uint32" + ipfix: "8057/901" + value: "TCP maximum segment size" + - + name: "TCP_SYN_SIZE" + type: "uint16" + ipfix: "8057/902" + value: "TCP SYN packet size" +--- \ No newline at end of file diff --git a/docs/_get_options/_defaults.md b/docs/_get_options/_defaults.md new file mode 100644 index 000000000..9e20ae372 --- /dev/null +++ b/docs/_get_options/_defaults.md @@ -0,0 +1,5 @@ +--- +title: +description: +code: +--- \ No newline at end of file diff --git a/docs/_get_options/a_rhel_packages.md b/docs/_get_options/a_rhel_packages.md new file mode 100644 index 000000000..c67cf60be --- /dev/null +++ b/docs/_get_options/a_rhel_packages.md @@ -0,0 +1,17 @@ +--- +title: Installation from binary packages (RPM) (recommended) +description: We use COPR infrastructure to build and serve ipfixprobe packages. Currently, we support packages for RPM-based distributions, such as OracleLinux, RockyLinux, ... EPEL version 8 or 9. + +instructions: + - + description: "Install copr repository." + code: + - "dnf install -y dnf-plugins-core && dnf copr -y enable @CESNET/NEMEA-stable" + + - + description: "After succesfull instalation of COPR, you can install the ipfixprobe via yum or dnf." + code: + - "dnf install ipfixprobe" + + +--- diff --git a/docs/_get_options/build_from_source.md b/docs/_get_options/build_from_source.md new file mode 100644 index 000000000..10ff2ae47 --- /dev/null +++ b/docs/_get_options/build_from_source.md @@ -0,0 +1,33 @@ +--- +title: Build from source codes +description: You can build ipfixprobe from source codes available at github. + +instructions: + - + description: "Install requirements" + code: + - "dnf -y install wget curl net-tools gcc gcc-c++ git libtool libpcap-devel libunwind libssl-devel libpcap-devel" + - + description: "Now get the ipfixprobe source codes" + code: + - "git clone https://github.com/CESNET/ipfixprobe.git" + - cd ipfixprobe + - autoreconf -i + - + description: "Ipfixprobe uses autotools to setup the build process. We encourage you to explore ./configure.sh -h to see all the available options. Nevertheless, for standard (max 1Gbps) network monitoroing without any specialized tools, you should use following configuration." + code: + - "./configure.sh --with-pcap --with-quic --with-unwind" + - + description: "Then just make the ipfixprobe and install it. You might need root privileges for installation." + code: + - "make -j 2" + - "sudo make install" + + - + description: "Optional NEMEA plugin. Ipfixprobe can export data directly to NEMEA framework. If you want to use this feature, you need to install NEMEA dependencies and enable this feature in autotools script." + code: + - "dnf install libtrap-devel unirec-devel" + - "./configure.sh --with-pcap --with-quic --with-unwind --with-nemea" + - "make -j 2" + - sudo make install +--- diff --git a/docs/_get_options/openwrt.md b/docs/_get_options/openwrt.md new file mode 100644 index 000000000..66b92bf33 --- /dev/null +++ b/docs/_get_options/openwrt.md @@ -0,0 +1,23 @@ +--- +title: Installation on Turris (OpenWrt routers) +description: CESNET feed is officially supported by CZ.NIC, so installation on Turris devices is easy! Contrary for other OpenWrt devices, it is most likely necessary to compile a package; see our NEMEA-OpenWrt feed for more details or contact us. Installation on Turris can be done via SSH, which is described bellow, or using LUCI intuitive interface. + +instructions: + - + description: "Update repository metadata" + code: + - opkg update + + - + description: "Install ipfixprobe" + code: + - opkg install ipfixprobe + + - + description: "Optionally for LUCI configuration page, install luci-app-ipfixprobe" + code: + - opkg install luci-app-ipfixprobe + + +--- + diff --git a/docs/_how/Input plugin.md b/docs/_how/Input plugin.md new file mode 100644 index 000000000..9a68b2dc0 --- /dev/null +++ b/docs/_how/Input plugin.md @@ -0,0 +1,132 @@ +--- +title: Input plugin +description: Input plugin defines source of incoming packets. Use -i to specify input plugin. + +options: + - + title: "Pcap reader" + description: "Input plugin for reading packets from a pcap file or a network interface" + parameters: + - + name: "f or file" + description: "Defines path to pcap file." + - + name: "i or ifc" + description: "Defines interface name." + - + name: "F or filter" + description: "Defines filter string." + - + name: "s or snaplen" + description: "Defines snapshot length in bytes (live capture only)." + - + name: "l or list" + description: "Print list of available interfaces." + runs: + - + explanation: "Read the pcap file specified by PATH value. Possible PATH value 'pcaps/bstats.pcap' " + code: "./ipfixprobe -i 'pcap;file=PATH;' -s 'cache'" + - + explanation: "Read packets from interface specified by IFC value. Possible IFC value 'eth0'" + code: "./ipfixprobe -i 'pcap;i=IFC;' -s 'cache'" + - + title: "DPDK" + description: "Input plugin for reading packets using DPDK interface" + parameters: + - + name: "b or bsize" + description: "Size of the MBUF packet buffer. Default: 64." + - + name: "p or port" + description: "DPDK port to be used as an input interface." + - + name: "m or mem" + description: "Size of the memory pool for received packets. Default: 16384." + - + name: "q or queue" + description: "Number of RX queues. Default: 1." + - + name: "e or eal" + description: "DPDK eal." + - + name: "M or mtu" + description: "Input interface MTU. Default: 1518." + runs: + - + explanation: "Read packets using DPDK input interface and 1 DPDK queue, enable plugins for basic statistics, http and tls, output to IPFIX on a local machine + DPDK EAL parameters are passed in `e, eal` parameters + DPDK plugin configuration has to be specified in the first input interface. + The following `dpdk` interfaces are given without parameters; their configuration is inherited from the first one. + Example for the queue of 3 DPDK input plugins (q=3): " + code: "./ipfixprobe -i 'dpdk;p=0;q=3;e=-c 0x1 -a <[domain:]bus:devid.func>' -i dpdk -i dpdk -p http -p bstats -p tls -o 'ipfix;h=127.0.0.1'" + - + explanation: "Same example for the multiport read from ports 0 and 1, note comma separated ports:" + code: "./ipfixprobe -i 'dpdk;p=0,1;q=3;e=-c 0x1 -a <[domain:]bus:devid.func>' -i dpdk -i dpdk -p http -p bstats -p tls -o 'ipfix;h=127.0.0.1'" + - + title: "DPDK-ring" + description: "DPDK ring input interface for ipfixprobe (secondary DPDK app)." + parameters: + - + name: "b or bsize" + description: "Size of the MBUF packet buffer. Default: 64." + - + name: "r or ring" + description: "Name of the ring to read packets from. Need to be specified explicitly thus no default provided." + - + name: "e or eal" + description: "DPDK eal." + runs: + - + explanation: "Read packets using DPDK input interface as secondary process with shared memory (DPDK rings) - in this case, 4 DPDK rings are used" + code: "./ipfixprobe -i 'dpdk-ring;r=rx_ipfixprobe_0;e= --proc-type=secondary' -i 'dpdk-ring;r=rx_ipfixprobe_1' -i 'dpdk-ring;r=rx_ipfixprobe_2' -i 'dpdk-ring;r=rx_ipfixprobe_3' -o 'text'" + - + title: "Raw" + description: "Input plugin for reading packets from raw interface" + parameters: + - + name: "i or ifc" + description: "Defines network interface name." + - + name: "b or blocks" + description: "Defines number of packet blocks." + - + name: "f or fanout" + description: "Enables packet fanout." + - + name: "p or pkts" + description: "Defines number of packets in block." + - + name: "l or list" + description: "Print list of available interfaces." + runs: + - + explanation: "Read packets from interface specified by IFC value. Possible IFC value 'eth0'" + code: "./ipfixprobe -i 'raw;ifc=IFC;' -s 'cache'" + - + title: "Benchmark" + description: "Input plugin for various benchmarking purposes." + parameters: + - + name: "m or mode" + description: "Defines benchmark mode: 1f (1x N-packet flow) or nf (Nx 1-packet flow)." + - + name: "S or seed" + description: "Defines string seed for random generator." + - + name: "d or duration" + description: "Defines duration in seconds." + - + name: "p or count" + description: "Defines packet count." + - + name: "s or size" + description: "Defines packet size." + - + name: "I or id" + description: "Defines link identifier number." + runs: + - + explanation: "Read packets from interface specified with DPDK ports 0 and 1" + code: "`./ipfixprobe -i 'dpdk;p=0,1;' -s 'cache'" + +--- \ No newline at end of file diff --git a/docs/_how/output plugin.md b/docs/_how/output plugin.md new file mode 100644 index 000000000..349c290c4 --- /dev/null +++ b/docs/_how/output plugin.md @@ -0,0 +1,88 @@ +--- +title: Output plugin +description: Output plugin defines how flows are expoted. Use -o to specify output plugin. + +options: +- + title: "Text" + description: "Provides human readable output to the terminal or file." + parameters: + - + name: "f or file" + description: "Defines path to savefile to write output in instead of stdout." + - + name: "m or mac" + description: "Boolean flag. Mac addresses are hidden if set." + + runs: + - + explanation: "Print expoted flows to the terminal without mac adresses " + code: "./ipfixprobe -o 'text;mac'-i 'pcap;file=...;' -s 'cache'" + - + explanation: "Print expoted flows to the FILE" + code: "./ipfixprobe -o 'text;f=FILE'-i 'pcap;file=...;' -s 'cache'" +- + title: "IPFIX" + description: "Exports data in the IPFIX format" + parameters: + - + name: "h or host" + description: "Defines ip address of remote collector." + - + name: "p or port " + description: "Defines collector port to send data to." + - + name: "m or mtu" + description: "Defines maximum size of ipfix packet payload sent." + - + name: "u or udp" + description: "Boolean flag. UDP is used if set." + - + name: "n or non-blocking-tcp" + description: "Boolean flag. Non-blocking-tcp socket is used if set." + - + name: "I or id" + description: "Defines exporter id." + - + name: "t or template" + description: "Defines template refresh rate in seconds." + runs: + - + explanation: "Send exported data to the localhost using UDP as an exporter 3." + code: "./ipfixprobe -o 'ipfix;h=127.0.0.1,u,I=3'-i 'pcap;file=...;' -s 'cache'" + - + explanation: "Send exported data to the localhost:4739 using non-blocking tcp as an exporter 3 with maximal transfer unit set to 2000." + code: "./ipfixprobe -o 'ipfix;h=127.0.0.1,p=4739,n,mtu=2000'-i 'pcap;file=...;' -s 'cache'" +- + title: "UNIREC" + description: "Exports data in the UNIREC format" + parameters: + - + name: "i or ifc" + description: "Defines unirec interface to use." + - + name: "p or plugins" + description: "Defines plugin-interface mapping. Plugins can be grouped like '(p1,p2,p3),p4,(p5,p6)." + - + name: "o or odid" + description: "Boolean flag.If set exports ODID field." + - + name: "e or eof" + description: "Boolean flag.If set sends eof messag on exit." + - + name: "I or id" + description: "Defines exporter id." + - + name: "h or help" + description: "Prints libtrap help." + runs: + - + explanation: "Send exported data to the Unix socket 'ipfixprobe'" + code: "./ipfixprobe -o 'unirec;i=u:ipfixprobe'-i 'pcap;file=...;' -s 'cache'" + - + explanation: "Same as previous, but should be used with small pcap files to avoid not sending data" + code: "./ipfixprobe -o 'unirec;i=u:ipfixprobe:timeout=WAIT:buffer=off'-i 'pcap;file=...;' -s 'cache'" + - + explanation: "Save exported data to the data.trapcap" + code: "./ipfixprobe -o 'unirec;i=f:data.trapcap'-i 'pcap;file=...;' -s 'cache'" +--- \ No newline at end of file diff --git a/docs/_how/storage plugin.md b/docs/_how/storage plugin.md new file mode 100644 index 000000000..1d182936b --- /dev/null +++ b/docs/_how/storage plugin.md @@ -0,0 +1,32 @@ +--- +title: Storage plugin +description: Storage plugin defines how flows are internally stored. Use -s to specify storage plugin. + +options: +- + title: "Cache" + description: "Currently only available plugin. Hash table is used to keep flows. Hash table is divided into rows. Each row is managed as LRU. " + parameters: + - + name: "s or size" + description: "Defines count of flows that are kept in the cache at once. Cache size is 2s." + - + name: "l or line" + description: "Defines length of the cache line. Line length is 2l." + - + name: "a or active" + description: "Defines active timeout. When there is a flow, that is active for more than -a seconds, its exported." + - + name: "i or inactive" + description: "Defines inactive timeout. When there is a flow, that is inactive for more than -i seconds, its exported." + - + name: "S or split " + description: "Boolean flag. Defines if the bidirectional flow between two nodes is splitted into 2 separate unidirectional flows." + - + name: "fe/frag-enable, fs/frag-size, ft/frag-timeout" + description: "Used to enable completing fragmented packets into one packet. Framentation cache size is fs and timeout to consider fragments belong to same packet is ft." + runs: + - + explanation: "Store flows using 'cache' " + code: "./ipfixprobe -s 'cache' -i 'pcap;file=PATH;'" +--- \ No newline at end of file diff --git a/docs/_includes/list-posts.html b/docs/_includes/list-posts.html new file mode 100644 index 000000000..0dd91fa9b --- /dev/null +++ b/docs/_includes/list-posts.html @@ -0,0 +1,10 @@ +{% for post in include.posts %} +
  • +

    {{ post.title }}

    + {% include post-title.html post=post %} +
    + {{ post.excerpt }} + +
    +
  • +{% endfor %} diff --git a/docs/_includes/navigation.html b/docs/_includes/navigation.html new file mode 100644 index 000000000..19153d821 --- /dev/null +++ b/docs/_includes/navigation.html @@ -0,0 +1,15 @@ + diff --git a/docs/_includes/post-title.html b/docs/_includes/post-title.html new file mode 100644 index 000000000..ce63dfee8 --- /dev/null +++ b/docs/_includes/post-title.html @@ -0,0 +1,10 @@ +

    + {% for category in include.post.categories %} + + {{ category | capitalize }} + + {% endfor %} + {% if include.post.date %} + + {% endif %} +

    diff --git a/docs/_includes/relative-src.html b/docs/_includes/relative-src.html new file mode 100644 index 000000000..fb32308f5 --- /dev/null +++ b/docs/_includes/relative-src.html @@ -0,0 +1 @@ +{% assign prefix = include.src | slice: 0, 2 %}{% assign protocol = include.src | slice: 0, 4 %}{% unless protocol == 'http' or prefix == "//" %}{{ site.baseurl }}{% endunless %}{{ include.src }} diff --git a/docs/_includes/social-icon.html b/docs/_includes/social-icon.html new file mode 100644 index 000000000..c4d125ba9 --- /dev/null +++ b/docs/_includes/social-icon.html @@ -0,0 +1,20 @@ +{% case include.icon %} + {% when "Facebook" %} + + {% when "Instagram" %} + + {% when "LinkedIn" %} + + {% when "Pinterest" %} + + {% when "Tumblr" %} + + {% when "Twitter" %} + + {% when "YouTube" %} + + {% when "RSS" %} + + {% when "GitHub" %} + +{% endcase %} diff --git a/docs/_layouts/archive.html b/docs/_layouts/archive.html new file mode 100644 index 000000000..4e8073ce3 --- /dev/null +++ b/docs/_layouts/archive.html @@ -0,0 +1,23 @@ +--- +layout: page +--- + +
    + {% assign blog = site.pages | where: "path", "blog/index.html" | first %} + + {% if blog.title %} +

    {{ blog.title }}

    + {% endif %} + + {% if blog.description %} +

    {{ blog.description }}

    + {% endif %} + + + +
      + {% include list-posts.html posts=page.posts %} +
    +
    diff --git a/docs/_layouts/default.html b/docs/_layouts/default.html new file mode 100644 index 000000000..19664e4a8 --- /dev/null +++ b/docs/_layouts/default.html @@ -0,0 +1,58 @@ + + + + + + + + {% seo %} + {% feed_meta %} + + + + + {% if jekyll.environment == 'production' and site.google_analytics_key != '' %} + + + {% endif %} + + +
    +
    +
    Hydra Logo
    + {% include navigation.html %} +
    +
    + {{ content }} + + + + diff --git a/docs/_layouts/page.html b/docs/_layouts/page.html new file mode 100644 index 000000000..1a1c7373c --- /dev/null +++ b/docs/_layouts/page.html @@ -0,0 +1,18 @@ +--- +layout: default +--- +
    +
    +
    + {% if page.heading %} +

    {{ page.heading }}

    + {% elsif page.title and page.layout != 'archive' %} +

    {{ page.title }}

    + {% endif %} + {% if page.description %} +

    {{ page.description }}

    + {% endif %} + {{ content }} +
    +
    +
    diff --git a/docs/_layouts/post.html b/docs/_layouts/post.html new file mode 100644 index 000000000..3617143dc --- /dev/null +++ b/docs/_layouts/post.html @@ -0,0 +1,37 @@ +--- +layout: page +--- +
    + {% include post-title.html post=page %} +
    + {{ content }} + +
    + {% if page.previous.url %} + + {% endif %} + {% if page.next.url %} + + {% endif %} +
    + + {% if site.disqus_shortname and page.comments %} +
    + + + {% endif %} +
    +
    diff --git a/docs/_sass/blog.scss b/docs/_sass/blog.scss new file mode 100644 index 000000000..cbae2baa7 --- /dev/null +++ b/docs/_sass/blog.scss @@ -0,0 +1,125 @@ +.blog-posts { + list-style: none; + padding: 0; + + li { + margin: 100px 0; + } +} + +.blog-post { + .author { + padding: 30px 0 0 0; + border: 1px solid #eee; + margin: 30px 0; + font-size: .8em; + + .square-image { + width: 125px; + height: 125px; + margin-top: 0; + } + .blurb { + text-align: center; + } + } + + h3 { + margin: 0; + a { + color: #000; + text-decoration: none; + font-weight: normal; + font-size: 1.3em; + } + } + + h2 { + text-align: left; + } + + .blog-navigation { + font-size: 14px; + display: block; + width: auto; + overflow: hidden; + a { + display: block; + width: 50%; + float: left; + margin: 1em 0; + } + + .next { + text-align: right; + } + } + + .post-details { + border-bottom: 1px solid #eee; + font-size: .9em; + + .blog-filter { + display: inline-block; + text-align: left; + + a { + position: relative; + top: -5px; + } + } + + a { + text-decoration: none; + } + + .post-date { + float: right; + } + + &:after { + content: ""; + display: table; + clear: both; + } + } + + .post-content { + .button { + margin: 30px 0 0 0; + } + } +} + +.pagination { + text-align: center; +} + +.blog-filter { + text-align: center; + a { + background: #eee; + padding: 3px 5px; + font-size: .8em; + border-radius: 5px; + color: #888; + transition: .2s ease-in-out; + + &:hover { + color: #555; + text-decoration: none; + } + } +} + +.blog-filter.cross a { + padding-right: 8px; + + &:after { + content: "x"; + font-size: .5em; + position: relative; + bottom: 4px; + right: -3px; + } +} diff --git a/docs/_sass/cloudcannon.scss b/docs/_sass/cloudcannon.scss new file mode 100644 index 000000000..0c27b69c0 --- /dev/null +++ b/docs/_sass/cloudcannon.scss @@ -0,0 +1,37 @@ +.editor-link, .nav-open nav .editor-link { + display: none; + margin-top: 0; + + .btn { + border: 0; + border-radius: 2px; + width: 100%; + max-width: 500px; + box-sizing: border-box; + font-size: 2rem; + text-decoration: none; + padding: 10px 15px; + margin: 0; + font-size: 18px; + } + + nav &, .btn { + cursor: pointer; + background-color: #f7e064; + color: #333; + box-shadow: 1px 1px 5px 0 rgba(0, 0, 0, 0.2); + + &:hover { + background-color: #f4d525; + color: #333; + } + } +} + +.cms-editor-active .editor-link { + display: block; +} + +.cms-editor-active nav .editor-link { + display: inline; +} diff --git a/docs/_sass/contact.scss b/docs/_sass/contact.scss new file mode 100644 index 000000000..dbf6d0659 --- /dev/null +++ b/docs/_sass/contact.scss @@ -0,0 +1,19 @@ +.map { + width: 100%; + margin: 100px 0; + height: 400px; +} + +.contact-box { + max-width: 750px; + margin: 0 auto; + text-align: center; + + form { + width: 100% + } + + p { + margin: 0; + } +} diff --git a/docs/_sass/developer.scss b/docs/_sass/developer.scss new file mode 100644 index 000000000..b65022ce5 --- /dev/null +++ b/docs/_sass/developer.scss @@ -0,0 +1,27 @@ + + +hr { + margin-top: 10px; + width: 100%; + } +table { + border-collapse: collapse; + width: 100%; + } +th, td { + border: 1px solid black; + padding: 8px; + text-align: left; + } +.hidden { + display: none; + } +.clickable { + cursor: pointer; + padding: 0.2em; + } +.clickable:hover { + color: #D3163C; + background-color: #CCCCCC; + padding: 0.2em; + } \ No newline at end of file diff --git a/docs/_sass/elements.scss b/docs/_sass/elements.scss new file mode 100644 index 000000000..a55f74140 --- /dev/null +++ b/docs/_sass/elements.scss @@ -0,0 +1,56 @@ +html { + background: #2b2b40; +} + +html, body { + margin: 0; + padding: 0; +} + +body { + font-family: "San Francisco", "Helvetica Neue", "Helvetica", "Arial"; + word-wrap:break-word; +} + +table { + overflow-x: scroll; + display:block; +} + +a { + color: #00a4ca; + text-decoration: none; +} + +a:hover { + text-decoration: underline; +} + +h1 strong, h2 strong { + font-weight: 700; +} + +h1 { + font-weight: 300; + font-size: 2.3em; + margin: 0; +} + +h2 { + font-weight: 300; + font-size: 2.2em; + margin: 0 0 13px 0; +} + +h3 { + margin: 20px 0 10px 0; +} + + +p, address { + font-size: 1.1em; + color: #666; + margin-bottom: 20px; + font-weight: 300; + line-height: 1.4em; +} diff --git a/docs/_sass/footer.scss b/docs/_sass/footer.scss new file mode 100644 index 000000000..2a2968551 --- /dev/null +++ b/docs/_sass/footer.scss @@ -0,0 +1,122 @@ +.footer-links { + width: 100%; + margin: 10px; + padding: 0; + + @media #{$tablet} { + -webkit-flex: 1 0 180px; + flex: 1 0 180px; + } + + li { + list-style: none; + margin: 15px auto; + + @media #{$tablet} { + max-width: 150px; + } + a { + + &:hover { + text-decoration: none; + } + svg { + fill: #999; + margin-right: 10px; + transition: fill 0.2s ease; + vertical-align: middle; + position: relative; + top: -2px; + width: 22px; + height: 22px; + } + + &:hover svg { + fill: #fff; + } + + &.twitter-icon:hover svg { + fill: #55acee; + } + + &.google-plus-icon:hover svg { + fill: #db4437; + } + + &.youtube-icon:hover svg { + fill: #cd201f; + } + + &.instagram-icon:hover svg { + fill: #f167f5; + } + + &.linkedin-icon:hover svg { + fill: #0077b5; + } + + &.pinterest-icon:hover svg { + fill: #bd081c; + } + + &.rss-icon:hover svg { + fill: #f26522; + } + } + } +} + +footer { + text-align: center; + padding: 50px 0 50px 0; + font-size: 1.1em; + position: relative; + background: $footer-color; + color: #fff; + + .copyright { + font-size: .8em; + margin: 0 auto; + + @media #{$tablet} { + text-align: center; + } + + } + + &, + a { + color: #999; + } + + h2 { + font-size: 1.4em; + margin: 30px 0; + color: #ccc; + } + + .footer-columns { + @extend %flexbox; + @include flex-flow(wrap); + margin: -10px -10px 10px -10px; + } + + a { + text-decoration: none; + + &:hover { + color: #fff; + } + } + + .legal-line { + width: 100%; + padding: 30px 0; + margin: 0; + background-color: #222527; + + a { + font-weight: 600; + } + } +} diff --git a/docs/_sass/forms.scss b/docs/_sass/forms.scss new file mode 100644 index 000000000..0011e06d3 --- /dev/null +++ b/docs/_sass/forms.scss @@ -0,0 +1,67 @@ +.button a, input[type=submit] { + color: #fff; + text-decoration: none; + padding: 10px 30px; + background: $brand-color; + border-radius: 3px; + border: 1px solid rgba(255,255,255,.5); + transition: .2s ease-in-out; +} + +.button a:hover, input[type=submit]:hover { + border: 1px solid #fff; + background: $secondary-brand-color; + cursor: pointer; +} + +.button.alt a { + background: rgba(255,255,255,0.15); + border-radius: 3px; + border: 1px solid rgba(255, 255, 255, 0.3); + padding: 16px 50px; +} + +.button.alt a:hover { + background: #fff; + color: $brand-color; +} + +textarea, input, button, select { font-family: inherit; font-size: inherit; } + +input[type=submit] { + margin: 20px 0 0 0; +} + +label, input, textarea { + display: block; + width: 100%; + box-sizing: border-box; +} + +textarea { + resize: vertical; + height: 150px; +} + +label { + margin: 20px 0 5px 0; +} + +input, textarea { + padding: 10px; + font-size: 1em; +} + +input, textarea { + -webkit-transition: all 0.30s ease-in-out; + -moz-transition: all 0.30s ease-in-out; + -ms-transition: all 0.30s ease-in-out; + -o-transition: all 0.30s ease-in-out; + outline: none; + border: 1px solid #DDDDDD; +} + +input[type=text]:focus, input[type=email]:focus, input[type=password]:focus, textarea:focus { + box-shadow: 0 0 5px rgba(81, 203, 238, 1); + border: 1px solid rgba(81, 203, 238, 1); +} diff --git a/docs/_sass/get_options.scss b/docs/_sass/get_options.scss new file mode 100644 index 000000000..ba6831155 --- /dev/null +++ b/docs/_sass/get_options.scss @@ -0,0 +1,8 @@ + + +section span { + background-color: #eee; + border: 1px solid #999; + display: block; + padding: 20px; + } \ No newline at end of file diff --git a/docs/_sass/how.scss b/docs/_sass/how.scss new file mode 100644 index 000000000..41e290cb7 --- /dev/null +++ b/docs/_sass/how.scss @@ -0,0 +1,28 @@ + + +section span { + background-color: #eee; + border: 1px solid #999; + display: block; + padding: 20px; + margin-bottom: 10px; + } + +p { + font-weight: normal; + margin: 0px; + } + +hr { + margin-top: 10px; + width: 100%; + } +ul, ol { + margin: 0.25em 0 0 0; + + } + +h2 { + font-size: 1.5em; + margin: 0.5em 0 0.5em 0; + } \ No newline at end of file diff --git a/docs/_sass/landing-page.scss b/docs/_sass/landing-page.scss new file mode 100644 index 000000000..581b6978c --- /dev/null +++ b/docs/_sass/landing-page.scss @@ -0,0 +1,63 @@ +.bottom-cta { + background: linear-gradient(to bottom, $brand-color 0%, $middle-gradient-color 100%); + color: #fff; + text-align: center; + margin: 0; + padding: 100px 0; + + h2 { + margin-bottom: 50px; + } +} + +.testimonial { + background: #f5f5f5; + margin: 0; + padding: 100px 0; + + .testimonial-block { + max-width: 750px; + width: 98%; + margin: 0 auto; + + @media #{$tablet} { + @include flexbox; + + blockquote { + -webkit-flex: 1; + flex: 1; + } + } + } +} + +.hero { + color: #ffffff; + text-align: center; + background: linear-gradient(to bottom, $middle-gradient-color 0%, $secondary-brand-color 100%) no-repeat #a05fb7; + padding-top: 50px; + + p { + color: #fff; + } +} + + + +@media #{$desktop} { + .flex { + @include flexbox; + align-items: center; + flex-direction: row; + + .text, .image { + -webkit-flex: 1; + flex: 1; + padding: 0 20px; + } + } + + .content section:nth-child(even) .flex { + flex-direction: row-reverse; + } +} diff --git a/docs/_sass/layout.scss b/docs/_sass/layout.scss new file mode 100644 index 000000000..8467ba8e4 --- /dev/null +++ b/docs/_sass/layout.scss @@ -0,0 +1,174 @@ +.container, .text-container { + margin: 0 auto; + position: relative; + padding: 0 20px; +} + +.text-container { + max-width: 750px; +} + +.container { + max-width: 1140px; + + &.max-container { + max-width: 100%; + padding: 0; + } +} + +header { + color: #fff; + padding: 20px 0; + background: $brand-color; /* Old browsers */ + background: linear-gradient(to bottom, $brand-color 0%, $middle-gradient-color 100%) no-repeat $brand-color; + + a { + color: #fff; + text-decoration: none; + z-index: 1; + position: relative; + + &:hover { + text-decoration: none; + } + } + + .company-name { + font-size: 1.7em; + line-height: 0; + + a { + display: inline-block; + } + + img { + display: block; + width: auto; + } + } +} + +.content { + background: #fff; + padding: 1px 0 0 0; + position: relative; +} + +.screenshot{ + max-width: 100%; + height: auto; + display: block; + box-shadow: 0 1px 0 #ccc, 0 1px 0 1px #eee; + border-radius: 2px; + margin-left: auto; + margin-right: auto; + background: #DDD url('data:image/svg+xml,%3Csvg%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20width%3D%2244%22%20height%3D%2212%22%20viewBox%3D%220%200%2044%2012%22%3E%3Ccircle%20cx%3D%226%22%20cy%3D%226%22%20r%3D%224%22%20fill%3D%22%23eee%22%20%2F%3E%3Ccircle%20cx%3D%2222%22%20cy%3D%226%22%20r%3D%224%22%20fill%3D%22%23eee%22%20%2F%3E%3Ccircle%20cx%3D%2238%22%20cy%3D%226%22%20r%3D%224%22%20fill%3D%22%23eee%22%20%2F%3E%3C%2Fsvg%3E') 4px 4px no-repeat; + padding: 20px 0 0 0; + position: relative; +} + +section { + padding: 100px 0; +} + +section + section { + padding-top: 0; +} + +.subtext { + margin-top: 10px; + text-align: center; +} + + +.cta { + margin: 60px 0; +} + +.page h2 { + text-align: center; +} + +blockquote { + padding: 18px 25px; + margin: 0; + quotes: "\201C""\201D""\2018""\2019"; + font-style: italic; + + .author { + display: block; + font-weight: bold; + margin: 10px 0 0 0; + font-size: .85em; + font-style: normal; + } + + p { + display: inline; + } +} + +blockquote:before { + color: #ccc; + content: open-quote; + font-size: 4em; + line-height: 0.1em; + margin-right: 0.25em; + vertical-align: -0.4em; +} + +.square-image { + width: 150px; + height: 150px; + overflow: hidden; + margin: 25px auto 0 auto; + position: relative; + border-radius: 200px; + + img { + position: absolute; + left: -1000%; + right: -1000%; + top: -1000%; + bottom: -1000%; + margin: auto; + width: 300px; + } +} + +.page { + margin-bottom: 0; + padding-bottom: 80px; +} + +.center-text { + text-align: center; +} + +.editor-link { + display: none; + margin-top: 0; + .btn { + border: 0; + border-radius: 2px; + width: 100%; + max-width: 500px; + box-sizing: border-box; + font-size: 2rem; + text-decoration: none; + padding: 10px 15px; + margin: 0; + font-size: 18px; + cursor: pointer; + background-color: #f7e064; + color: #333; + box-shadow: 1px 1px 5px 0 rgba(0, 0, 0, 0.2); + + &:hover { + background-color: #f4d525; + color: #333; + } + } + +} diff --git a/docs/_sass/mixins/columns.scss b/docs/_sass/mixins/columns.scss new file mode 100644 index 000000000..010eae984 --- /dev/null +++ b/docs/_sass/mixins/columns.scss @@ -0,0 +1,5 @@ +@mixin columns($value) { + columns: $value; + -webkit-columns: $value; + -moz-columns: $value; +} diff --git a/docs/_sass/mixins/flexbox.scss b/docs/_sass/mixins/flexbox.scss new file mode 100644 index 000000000..92a03fd2b --- /dev/null +++ b/docs/_sass/mixins/flexbox.scss @@ -0,0 +1,394 @@ +// Flexbox Mixins +// http://philipwalton.github.io/solved-by-flexbox/ +// https://github.com/philipwalton/solved-by-flexbox +// +// Copyright (c) 2013 Brian Franco +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +// +// This is a set of mixins for those who want to mess around with flexbox +// using the native support of current browsers. For full support table +// check: http://caniuse.com/flexbox +// +// Basically this will use: +// +// * Fallback, old syntax (IE10, mobile webkit browsers - no wrapping) +// * Final standards syntax (FF, Safari, Chrome, IE11, Opera) +// +// This was inspired by: +// +// * http://dev.opera.com/articles/view/advanced-cross-browser-flexbox/ +// +// With help from: +// +// * http://w3.org/tr/css3-flexbox/ +// * http://the-echoplex.net/flexyboxes/ +// * http://msdn.microsoft.com/en-us/library/ie/hh772069(v=vs.85).aspx +// * http://css-tricks.com/using-flexbox/ +// * http://dev.opera.com/articles/view/advanced-cross-browser-flexbox/ +// * https://developer.mozilla.org/en-us/docs/web/guide/css/flexible_boxes + +//---------------------------------------------------------------------- + +// Flexbox Containers +// +// The 'flex' value causes an element to generate a block-level flex +// container box. +// +// The 'inline-flex' value causes an element to generate a inline-level +// flex container box. +// +// display: flex | inline-flex +// +// http://w3.org/tr/css3-flexbox/#flex-containers +// +// (Placeholder selectors for each type, for those who rather @extend) + +@mixin flexbox { + display: -webkit-box; + display: -webkit-flex; + display: -moz-flex; + display: -ms-flexbox; + display: flex; +} + +%flexbox { @include flexbox; } + +//---------------------------------- + +@mixin inline-flex { + display: -webkit-inline-box; + display: -webkit-inline-flex; + display: -moz-inline-flex; + display: -ms-inline-flexbox; + display: inline-flex; +} + +%inline-flex { @include inline-flex; } + +//---------------------------------------------------------------------- + +// Flexbox Direction +// +// The 'flex-direction' property specifies how flex items are placed in +// the flex container, by setting the direction of the flex container's +// main axis. This determines the direction that flex items are laid out in. +// +// Values: row | row-reverse | column | column-reverse +// Default: row +// +// http://w3.org/tr/css3-flexbox/#flex-direction-property + +@mixin flex-direction($value: row) { + @if $value == row-reverse { + -webkit-box-direction: reverse; + -webkit-box-orient: horizontal; + } @else if $value == column { + -webkit-box-direction: normal; + -webkit-box-orient: vertical; + } @else if $value == column-reverse { + -webkit-box-direction: reverse; + -webkit-box-orient: vertical; + } @else { + -webkit-box-direction: normal; + -webkit-box-orient: horizontal; + } + -webkit-flex-direction: $value; + -moz-flex-direction: $value; + -ms-flex-direction: $value; + flex-direction: $value; +} + // Shorter version: + @mixin flex-dir($args...) { @include flex-direction($args...); } + +//---------------------------------------------------------------------- + +// Flexbox Wrap +// +// The 'flex-wrap' property controls whether the flex container is single-line +// or multi-line, and the direction of the cross-axis, which determines +// the direction new lines are stacked in. +// +// Values: nowrap | wrap | wrap-reverse +// Default: nowrap +// +// http://w3.org/tr/css3-flexbox/#flex-wrap-property + +@mixin flex-wrap($value: nowrap) { + // No Webkit Box fallback. + -webkit-flex-wrap: $value; + -moz-flex-wrap: $value; + @if $value == nowrap { + -ms-flex-wrap: none; + } @else { + -ms-flex-wrap: $value; + } + flex-wrap: $value; +} + +//---------------------------------------------------------------------- + +// Flexbox Flow (shorthand) +// +// The 'flex-flow' property is a shorthand for setting the 'flex-direction' +// and 'flex-wrap' properties, which together define the flex container's +// main and cross axes. +// +// Values: | +// Default: row nowrap +// +// http://w3.org/tr/css3-flexbox/#flex-flow-property + +@mixin flex-flow($values: (row nowrap)) { + // No Webkit Box fallback. + -webkit-flex-flow: $values; + -moz-flex-flow: $values; + -ms-flex-flow: $values; + flex-flow: $values; +} + +//---------------------------------------------------------------------- + +// Flexbox Order +// +// The 'order' property controls the order in which flex items appear within +// their flex container, by assigning them to ordinal groups. +// +// Default: 0 +// +// http://w3.org/tr/css3-flexbox/#order-property + +@mixin order($int: 0) { + -webkit-box-ordinal-group: $int + 1; + -webkit-order: $int; + -moz-order: $int; + -ms-flex-order: $int; + order: $int; +} + +//---------------------------------------------------------------------- + +// Flexbox Grow +// +// The 'flex-grow' property sets the flex grow factor. Negative numbers +// are invalid. +// +// Default: 0 +// +// http://w3.org/tr/css3-flexbox/#flex-grow-property + +@mixin flex-grow($int: 0) { + -webkit-box-flex: $int; + -webkit-flex-grow: $int; + -moz-flex-grow: $int; + -ms-flex-positive: $int; + flex-grow: $int; +} + +//---------------------------------------------------------------------- + +// Flexbox Shrink +// +// The 'flex-shrink' property sets the flex shrink factor. Negative numbers +// are invalid. +// +// Default: 1 +// +// http://w3.org/tr/css3-flexbox/#flex-shrink-property + +@mixin flex-shrink($int: 1) { + -webkit-flex-shrink: $int; + -moz-flex-shrink: $int; + -ms-flex-negative: $int; + flex-shrink: $int; +} + +//---------------------------------------------------------------------- + +// Flexbox Basis +// +// The 'flex-basis' property sets the flex basis. Negative lengths are invalid. +// +// Values: Like "width" +// Default: auto +// +// http://www.w3.org/TR/css3-flexbox/#flex-basis-property + +@mixin flex-basis($value: auto) { + -webkit-flex-basis: $value; + -moz-flex-basis: $value; + -ms-flex-preferred-size: $value; + flex-basis: $value; +} + +//---------------------------------------------------------------------- + +// Flexbox "Flex" (shorthand) +// +// The 'flex' property specifies the components of a flexible length: the +// flex grow factor and flex shrink factor, and the flex basis. When an +// element is a flex item, 'flex' is consulted instead of the main size +// property to determine the main size of the element. If an element is +// not a flex item, 'flex' has no effect. +// +// Values: none | || +// Default: See individual properties (1 1 0). +// +// http://w3.org/tr/css3-flexbox/#flex-property + +@mixin flex($fg: 1, $fs: null, $fb: null) { + + // Set a variable to be used by box-flex properties + $fg-boxflex: $fg; + + // Box-Flex only supports a flex-grow value so let's grab the + // first item in the list and just return that. + @if type-of($fg) == 'list' { + $fg-boxflex: nth($fg, 1); + } + + -webkit-box-flex: $fg-boxflex; + -webkit-flex: $fg $fs $fb; + -moz-box-flex: $fg-boxflex; + -moz-flex: $fg $fs $fb; + -ms-flex: $fg $fs $fb; + flex: $fg $fs $fb; +} + +//---------------------------------------------------------------------- + +// Flexbox Justify Content +// +// The 'justify-content' property aligns flex items along the main axis +// of the current line of the flex container. This is done after any flexible +// lengths and any auto margins have been resolved. Typically it helps distribute +// extra free space leftover when either all the flex items on a line are +// inflexible, or are flexible but have reached their maximum size. It also +// exerts some control over the alignment of items when they overflow the line. +// +// Note: 'space-*' values not supported in older syntaxes. +// +// Values: flex-start | flex-end | center | space-between | space-around +// Default: flex-start +// +// http://w3.org/tr/css3-flexbox/#justify-content-property + +@mixin justify-content($value: flex-start) { + @if $value == flex-start { + -webkit-box-pack: start; + -ms-flex-pack: start; + } @else if $value == flex-end { + -webkit-box-pack: end; + -ms-flex-pack: end; + } @else if $value == space-between { + -webkit-box-pack: justify; + -ms-flex-pack: justify; + } @else if $value == space-around { + -ms-flex-pack: distribute; + } @else { + -webkit-box-pack: $value; + -ms-flex-pack: $value; + } + -webkit-justify-content: $value; + -moz-justify-content: $value; + justify-content: $value; +} + // Shorter version: + @mixin flex-just($args...) { @include justify-content($args...); } + +//---------------------------------------------------------------------- + +// Flexbox Align Items +// +// Flex items can be aligned in the cross axis of the current line of the +// flex container, similar to 'justify-content' but in the perpendicular +// direction. 'align-items' sets the default alignment for all of the flex +// container's items, including anonymous flex items. 'align-self' allows +// this default alignment to be overridden for individual flex items. (For +// anonymous flex items, 'align-self' always matches the value of 'align-items' +// on their associated flex container.) +// +// Values: flex-start | flex-end | center | baseline | stretch +// Default: stretch +// +// http://w3.org/tr/css3-flexbox/#align-items-property + +@mixin align-items($value: stretch) { + @if $value == flex-start { + -webkit-box-align: start; + -ms-flex-align: start; + } @else if $value == flex-end { + -webkit-box-align: end; + -ms-flex-align: end; + } @else { + -webkit-box-align: $value; + -ms-flex-align: $value; + } + -webkit-align-items: $value; + -moz-align-items: $value; + align-items: $value; +} + +//---------------------------------- + +// Flexbox Align Self +// +// Values: auto | flex-start | flex-end | center | baseline | stretch +// Default: auto + +@mixin align-self($value: auto) { + // No Webkit Box Fallback. + -webkit-align-self: $value; + -moz-align-self: $value; + @if $value == flex-start { + -ms-flex-item-align: start; + } @else if $value == flex-end { + -ms-flex-item-align: end; + } @else { + -ms-flex-item-align: $value; + } + align-self: $value; +} + +//---------------------------------------------------------------------- + +// Flexbox Align Content +// +// The 'align-content' property aligns a flex container's lines within the +// flex container when there is extra space in the cross-axis, similar to +// how 'justify-content' aligns individual items within the main-axis. Note, +// this property has no effect when the flexbox has only a single line. +// +// Values: flex-start | flex-end | center | space-between | space-around | stretch +// Default: stretch +// +// http://w3.org/tr/css3-flexbox/#align-content-property + +@mixin align-content($value: stretch) { + // No Webkit Box Fallback. + -webkit-align-content: $value; + -moz-align-content: $value; + @if $value == flex-start { + -ms-flex-line-pack: start; + } @else if $value == flex-end { + -ms-flex-line-pack: end; + } @else { + -ms-flex-line-pack: $value; + } + align-content: $value; +} diff --git a/docs/_sass/navigation.scss b/docs/_sass/navigation.scss new file mode 100644 index 000000000..b2a4b62c0 --- /dev/null +++ b/docs/_sass/navigation.scss @@ -0,0 +1,86 @@ +.nav-open nav { + border-bottom: 1px dotted rgba(255, 255, 255, .2); + padding: 10px 0; + a { + display: block; + } + + @media #{$mid-point} { + border: 0; + padding: 0 20px; + + a { + display: inline; + } + } +} + +nav { + text-transform: uppercase; + font-size: .8em; + width: 100%; + + @media #{$mid-point} { + text-align: right; + position: absolute; + top: 13px; + right: 0; + padding: 0 20px; + } + + + a { + margin: 0 3px; + padding: 20px 10px; + border-bottom: 1px solid rgba(255,255,255,0); + color: rgba(255,255,255,.8); + transition: .2s ease-in-out; + display: none; + + @media #{$mid-point} { + display: inline; + padding: 10px; + } + + + &.nav-toggle { + display: inline; + position: absolute; + right: 10px; + top: -22px; + font-size: 1.9em; + border: 0; + + @media #{$mid-point} { + display: none; + } + + &:hover { + border: 0; + } + } + } + + a:hover { + + border-bottom: 1px solid rgba(255,255,255,.3); + color: #fff; + } + + @media #{$mid-point} { + a.highlight { + border: 1px #ccc solid; + border-radius: 5px; + + &:hover { + background: #fff; + color: $brand-color; + } + } + } + + a.active { + color: #fff; + } + +} diff --git a/docs/_sass/pricing.scss b/docs/_sass/pricing.scss new file mode 100644 index 000000000..19b92ed3d --- /dev/null +++ b/docs/_sass/pricing.scss @@ -0,0 +1,71 @@ +.plans { + @extend %flexbox; + @include flex-flow(wrap); + padding: 50px 0 30px 0; + + .plan { + list-style: none; + padding: 0; + margin: 0 10px 50px 10px; + text-align: center; + border: 1px solid #eee; + border-radius: 5px; + box-shadow: 0px 0px 10px #eee; + width: 100%; + + .highlighted { + font-size: 1.2em + } + + .pricing-cta { + padding: 0; + + a { + display: block; + box-sizing: border-box; + padding: 20px 0; + border-radius: 0 0 2px 2px; + border: 0; + } + } + + @media #{$desktop} { + -webkit-flex: 1; + flex: 1; + } + + li { + border-top-right-radius: 5px; + border-top-left-radius: 5px; + padding: 20px 0; + h3 { + padding: 0; + margin: 0; + color: #fff; + font-weight: normal; + } + } + } +} + +.faq { + @media #{$desktop} { + @include columns(2); + } + color: #666; + div { + break-inside: avoid; + padding: 25px 0; + } + + dt { + font-weight: bold; + margin: 0 0 5px 0; + } + + dd { + padding: 0; + margin: 0; + + } +} diff --git a/docs/_sass/staff.scss b/docs/_sass/staff.scss new file mode 100644 index 000000000..78cc262b3 --- /dev/null +++ b/docs/_sass/staff.scss @@ -0,0 +1,38 @@ +.staff { + padding: 0; + list-style: none; + @extend %flexbox; + @include flex-flow(wrap); + text-align: center; + li { + padding: 30px 20px; + box-sizing: border-box; + width: 100%; + + @media #{$tablet} { + @include flex(1, 1, 45%); + } + + @media #{$desktop} { + @include flex(1, 1, 29%); + } + + } + + .square-image { + width: 200px; + height: 200px; + img { + border-radius: 200px; + } + } + + .name { + font-size: 1.3em; + margin-top: 20px; + } + + .position { + color: #666; + } +} diff --git a/docs/_sass/variables.scss b/docs/_sass/variables.scss new file mode 100644 index 000000000..6ce421ac2 --- /dev/null +++ b/docs/_sass/variables.scss @@ -0,0 +1,9 @@ +$brand-color: #333333; +$secondary-brand-color: #333333; +$footer-color: #000000; +$middle-gradient-color: mix($brand-color, $secondary-brand-color, 95%); + +// Breakpoints +$tablet: "(min-width: 450px)"; +$mid-point: "(min-width: 620px)"; +$desktop: "(min-width: 768px)"; diff --git a/docs/apple-touch-icon.png b/docs/apple-touch-icon.png new file mode 100644 index 000000000..65b56de52 Binary files /dev/null and b/docs/apple-touch-icon.png differ diff --git a/docs/blog/index.html b/docs/blog/index.html new file mode 100644 index 000000000..80ce1e326 --- /dev/null +++ b/docs/blog/index.html @@ -0,0 +1,26 @@ +--- +title: Blog +description: Keep up with the latest news. +--- + +
    + +
      + {% include list-posts.html posts=paginator.posts %} +
    + + {% if paginator.total_pages > 1 %} + + + {% endif %} +
    diff --git a/docs/css/screen.scss b/docs/css/screen.scss new file mode 100644 index 000000000..d14200c20 --- /dev/null +++ b/docs/css/screen.scss @@ -0,0 +1,19 @@ +--- +--- +@import "mixins/flexbox"; +@import "mixins/columns"; +@import "variables"; +@import "elements"; +@import "landing-page"; +@import "layout"; +@import "pricing"; +@import "staff"; +@import "contact"; +@import "blog"; +@import "forms"; +@import "navigation"; +@import "footer"; +@import "cloudcannon"; +@import "get_options"; +@import "developer"; +@import "how"; \ No newline at end of file diff --git a/docs/developer.html b/docs/developer.html new file mode 100644 index 000000000..8451a5046 --- /dev/null +++ b/docs/developer.html @@ -0,0 +1,49 @@ +--- +title: Developer info +heading: Developer info +--- +
    +
    +
    +

    The architecture of the ipfixprobe can be described by the following diagram:

    + +

    Process plugin

    +

    The ipfixprobe contains script that creates template for new process plugin. To use it follow these steps:
    +

    +

    Run the script:

    + +

    cd process

    +

    ./create_plugin.sh

    +
    + +

    To create the process plugin follow these steps:

    +
      +
    1. Add plugin_name.hpp and plugin_name.cpp files to ipfixprobe_process_src variable in Makefile.am.

    2. +
    3. Implement process plugin event functions. Don't forget to remove unused events to keep default implementation.

    4. +
    5. Set PLUGIN_NAME_UNIREC_TEMPLATE and IPFIX_PLUGIN_NAME_TEMPLATE macros to export Unirec or IPFIX data respectively.

    6. +
    7. Define Unirec and IPFIX fields to export Unirec or IPFIX respectively.

    8. +
    9. Implement fill_ipfix and fill_unirec.

    10. +
    11. Update README.md.

    12. +
    +
    +

    Process plugin events

    +

    pre_create Is called before the creation of new flow from the initial packet.

    +

    post_create is called after the flow is created, taket newly created flow and initial packet.

    +

    pre_update is called when incoming packet belongs to the existing flow, before the data from the packet are added to the flow.

    +

    post_update is called after the data of the packet are added to the flow.

    +

    pre_export is called right before the flow is exported.

    +
    +

    Input plugin

    +

    You can also create own input plugin.

    + +

    To create the input plugin follow these steps:

    +
      +
    1. Create plugin_name.hpp and plugin_name.cpp in the input directory.
    2. +
    3. Add plugin_name.hpp and plugin_name.cpp files to ipfixprobe_input_src variable in Makefile.am.
    4. +
    5. Create a plugin class that inherites from the Input plugin class.
    6. +
    7. Override virtual Plugin class methods (init, close, get_parser, get_name) and Input plugin method to receive new packets(get).
    8. +
    +
    +
    +
    +
    \ No newline at end of file diff --git a/docs/export.html b/docs/export.html new file mode 100644 index 000000000..a89b6735f --- /dev/null +++ b/docs/export.html @@ -0,0 +1,57 @@ +--- +title: Export data +heading: Export data +--- + + +{% assign sorted_export = site.export | sort: 'title' %} +
    +
    +
    +

    Process plugins can export data. Export format of each plugin is described in this section

    +
    + {% for export_table in sorted_export %} +
    +

    {{export_table.title}}

    + {% assign textId = export_table.title | append: "_text" %} + + {% assign tableId = export_table.title | append: "_table" %} + + + + + + + + + + + {% for row in export_table.fields %} + + + + + + + {% endfor %} + + + {% endfor %} +
    +
    +
    +
    + + diff --git a/docs/favicon.png b/docs/favicon.png new file mode 100644 index 000000000..2a03ccecf Binary files /dev/null and b/docs/favicon.png differ diff --git a/docs/get_options.html b/docs/get_options.html new file mode 100644 index 000000000..30f2855d7 --- /dev/null +++ b/docs/get_options.html @@ -0,0 +1,30 @@ +--- +title: Installation of ipfixprobe +heading: How to install ipfixprobe? +--- + +{% for option in site.get_options %} +
    +
    +
    +

    {{ option.title }}

    +

    {{ option.description }}

    +
    +
    + {%if option.instructions %} + {% for instruction in option.instructions %} +

    {{ instruction.description }}

    + + {% for line in instruction.code %} + {{line}}
    + {% endfor %} +
    + {% endfor %} + {% endif %} +
    +
    +
    +{% endfor %} +
    +Continue reading with "How to use it"... +
    diff --git a/docs/how.html b/docs/how.html new file mode 100644 index 000000000..9b7c07f6d --- /dev/null +++ b/docs/how.html @@ -0,0 +1,211 @@ +--- +title: How to use ipfixprobe +heading: ipfixprobe usage +--- + +
    +
    + This guide expects ipfixprobe is already installed, see installation step. + + The following sections describe alternative ways how to run ipfixprobe. + +
    +
    +

    Systemd service (recommended)

    +

    The ipfixprobe can be set up to be used as a daemon to continuously process incoming packets from the boot up:

    +

    Create your instance.conf configuration, and save it in /etc/ipfixprobe/
    + + All settings are explained in the example file + +

    To start the systemd service, use:

    + +

    sudo systemctl start ipfixprobe@instance.service

    +
    + + "instance" serves as Your identifier of ipfixprobe instance, it must be equal in the name of the configuration file and in the systemctl command. + +

    To enable the service at system startup, run:

    + +

    sudo systemctl enable ipfixprobe@instance.service

    +
    + +
    + +

    Command line — starting manually

    +

    The simplest way to use ipfixprobe is to process PCAP file using PCAP plugin (need to be ./configured with --with-pcap)

    +
    + +

    ./ipfixprobe -s cache -i "pcap;file=pcaps/http.pcap" -o "text;m"

    +
    +

    {{ plugin.title | raw }}

    +

    {{ plugin.description | raw }}

    +
    +

    The ipfixprobe consists of one input, zero or one output, one storage and zero or more process plugins.

    + {% for plugin in site.how %} +
    +

    +

    {{ plugin.title | raw }}

    +

    {{ plugin.description | raw }}

    +
    + {% for option in plugin.options %} +
    +

    {{ option.title }}

    +

    {{ option.description }}

    + {% if plugin.options %} + + Command line parameters used by {{ option.title }} plugin:
    + {% for parameter in option.parameters %} + {{ parameter.name | raw }} : {{ parameter.description}}
    + {% endfor %} +
    + {% endif %} + {% for run in option.runs %} + +

    {{ run.explanation }}

    +

    {{ run.code }}

    +
    + {% endfor %} + {% endfor %} + {% endfor %} + +
    +

    One-time convertion of PCAP file to CSV

    +

    The ipfixprobe can be used to convert given PCAP file to the CSV containing flows from that file in the Unirec format.

    + + Requirements:
    +
      +
    • Docker or Podman
    • +
    • bash
    • +
    • which, mktemp
    • +
    +
    + + This container performs the following tasks:
    +
      +
    1. Copies a pcap file and processing script into the container
    2. +
    3. Runs the ipfixprobe tool to export flows
    4. +
    5. Logs the results in CSV format
    6. +
    +
    +

    Build

    +

    The script builds the image automatically, but be sure that Dockerfile is in the same directory.
    + To build the manually image, navigate to the directory containing the Dockerfile and run:

    + +

    docker build -t docker_ipfixprobe .

    +
    +

    Run

    + + Parameters:
    +
      +
    • process_script.sh Script for processing the pcap file inside the container
    • +
    • input_file.pcap Path to the input pcap file
    • +
    • output_file.csv Path to the output CSV file
    • +
    +
    + +

    bash ./ipfixprobe_wrapper.sh <process_script.sh> <input_file.pcap> <output_file.csv>

    +
    +

    To process a file ../pcaps/mixed.pcap using a processing script process_script.sh and output the results to output.csv, use the following wrapper script:

    + +

    bash ./ipfixprobe_wrapper.sh ./process_script.sh ../pcaps/mixed.pcap ./output.csv

    +
    + +
    +

    OpenWrt / Turris

    +

    Create and save the configuration file to the /etc/config/ipfixprobe. Example of configuration file:

    + +

    NEMEA ipfixprobe + # Copyright (C) 2022-2023 CESNET + + # Available options for profiles, 'list' options can be used repeatedly: + # list interfaces - list of NIC, e.g., eth0, enp0s1, ... + # list plugins - list of plugin names, see 'ipfixprobe -h process' for help + # ipfix_host - address of IPFIX collector + # ipfix_port - port of IPFIX collector, default: 4739 + # ipfix_udp - 1 to export to IPFIX collector via UDP, 0 via TCP + # cache_size - size of flow cache as exponent of 2, default: 1024 + # cache_line - size of flow cache line as exponent of 2, default: 4 + # active_timeout - active timeout in seconds, default: 300 + # inactive_timeout - inactive timeout in seconds, default: 30 + # link - unsigned integer as identification of link/router + # dir - unsigned integer as identification of direction/NIC + # split_biflow - 1 to split biflow to uniflow, default: 0 to use biflow + # ipfix_mtu - size of max transmission unit (MTU), default: 1452 + # + # respawn - enable respawn of crashed process + # respawn_threshold - timeout in seconds for restarting a service after it closes + # respawn_timeout - max time in seconds to wait for a process respawn to complete + # respawn_retry - max number of attempts to respawn before giving up, 0 means newer stop trying to respawn + # core - size of coredump, '0' - not generate, 'unlimited' - unlimited size + # + # enabled - 1 to enable start daemon instance for that profile, NOTE: if profile is directly specified for start script + # (example: '/etc/init.d/ipfixprobe start wan profileX profileY lan'), this option is ignored + + config profile 'lan' + option enabled '0' + list interfaces 'br-lan' + list plugins 'basicplus' + list plugins 'dns' + list plugins 'http' + list plugins 'pstats' + list plugins 'ovpn' + list plugins 'wg' + list plugins 'dnssd;txt' + list plugins 'ssdp' + list plugins 'tls' + list plugins 'quic' + option ipfix_host '127.0.0.1' + option ipfix_port '4739' + option ipfix_udp '1' + option link '1' + option dir '1'

    +
    +

    Prepare the init script:

    + +

    wget https://raw.githubusercontent.com/CESNET/Nemea-OpenWRT/master/net/ipfixprobe/files/init.d/ipfixprobe

    +

    sudo mv ipfixprobe /etc/init.d/ipfixprobe

    +
    +

    To run the script manually use:

    + +

    /etc/init.d/ipfixprobe start

    +
    +

    To start the service at system startup:

    + +

    /etc/init.d/ipfixprobe enable

    +
    +
    +
    +
    +
    + diff --git a/docs/images/_screenshot.png b/docs/images/_screenshot.png new file mode 100644 index 000000000..3cbd8be8f Binary files /dev/null and b/docs/images/_screenshot.png differ diff --git a/docs/images/dashboard.png b/docs/images/dashboard.png new file mode 100644 index 000000000..c10df9fb2 Binary files /dev/null and b/docs/images/dashboard.png differ diff --git a/docs/images/datacenter.jpeg b/docs/images/datacenter.jpeg new file mode 100644 index 000000000..4d54d9180 Binary files /dev/null and b/docs/images/datacenter.jpeg differ diff --git a/docs/images/github.png b/docs/images/github.png new file mode 100644 index 000000000..f121cbe5c Binary files /dev/null and b/docs/images/github.png differ diff --git a/docs/images/ipfixprobe-horizontal.svg b/docs/images/ipfixprobe-horizontal.svg new file mode 100644 index 000000000..fbb88f0a3 --- /dev/null +++ b/docs/images/ipfixprobe-horizontal.svg @@ -0,0 +1,313 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/images/ipfixprobe_architecture.jpg b/docs/images/ipfixprobe_architecture.jpg new file mode 100644 index 000000000..bdfb4bc3f Binary files /dev/null and b/docs/images/ipfixprobe_architecture.jpg differ diff --git a/docs/images/logo.svg b/docs/images/logo.svg new file mode 100644 index 000000000..b31aa32c6 --- /dev/null +++ b/docs/images/logo.svg @@ -0,0 +1,310 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/images/network.jpg b/docs/images/network.jpg new file mode 100644 index 000000000..c4a12d5db Binary files /dev/null and b/docs/images/network.jpg differ diff --git a/docs/images/router.jpeg b/docs/images/router.jpeg new file mode 100644 index 000000000..afb74f0ac Binary files /dev/null and b/docs/images/router.jpeg differ diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 000000000..bed2167da --- /dev/null +++ b/docs/index.html @@ -0,0 +1,51 @@ +--- +title: ipfixprobe +description: free and high-performance flow monitoring probe! +--- +
    +
    +

    High-performance flow monitoring probe.

    +

    Monitor and analyze your network traffic for free!

    + +
    +
    + +
    +
    +
    +
    +

    Does ipfixprobe support small routers?

    +

    The ipfixprobe exporter can be deployed to small SOHO routers with OpenWRT operating systems. In case of Turris routers, ipfixprobe is available in its standard repositories.

    +
    + +
    +
    + +
    +
    +
    +

    How shall I deploy ipfixprobe on my network?

    +

    The ipfixprobe supports multiple inputs. From the high-speed DPDK or NDP inputs produced by specialized 100 Gbps network monitoring cards, + to slower interfaces such as libpcap that is supported in any operating system.

    +
    + +
    +
    + +
    +
    +
    +

    How can I get the data?

    +

    The ipfixprobe support multiple output plugins. From the basic ones, where flow data are dumped to standard output in form of text to the standard IPFIX format, that can be used together with IPFIX collectors such as ipfixcol2, which enables multiple additional format outputs. Just check out the documentation to learn how to use ipfixprobe and ipfixcol2 together with systems like Apache Kafka or ELK.

    +
    + +
    +
    + + + +
    +

    Let's monitor your network now!

    + +
    +
    diff --git a/docs/robots.txt b/docs/robots.txt new file mode 100644 index 000000000..413700291 --- /dev/null +++ b/docs/robots.txt @@ -0,0 +1,6 @@ +--- +layout: null +sitemap: false +--- +User-agent: * +Sitemap: {{ site.url }}/sitemap.xml diff --git a/docs/siteicon.svg b/docs/siteicon.svg new file mode 100644 index 000000000..a133ac66c --- /dev/null +++ b/docs/siteicon.svg @@ -0,0 +1,267 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/touch-icon.png b/docs/touch-icon.png new file mode 100644 index 000000000..5af55cd95 Binary files /dev/null and b/docs/touch-icon.png differ diff --git a/include/ipfixprobe/cttmeta.hpp b/include/ipfixprobe/cttmeta.hpp new file mode 100644 index 000000000..6783b9ef4 --- /dev/null +++ b/include/ipfixprobe/cttmeta.hpp @@ -0,0 +1,81 @@ +#ifndef IPXP_INPUT_CTT_HPP +#define IPXP_INPUT_CTT_HPP + +#include +#include + +namespace ipxp { + +enum CsumStatus : uint8_t { + CSUM_UNKNOWN = 0x0, ///< No information about the checksum + CSUM_BAD = 0x1, ///< The checksum in the packet is wrong + CSUM_GOOD = 0x2, ///< The checksum in the packet is valid + CSUM_NONE = 0x3 ///< Checksum not correct but header integrity verified +}; + +enum ParserStatus : uint8_t { + PA_OK = 0x0, ///< Parsing completed successfully + PA_UNKNOWN = 0x1, ///< Parser stopped at an unknown protocol + PA_LIMIT = 0x2, ///< Parser stopped at its own limit (e.g., VLAN=4) + PA_ERROR = 0x3 ///< Error in protocol header or parsing overflow +}; + +enum L2PType : uint8_t { + L2_UNKNOWN = 0x0, ///< Unknown L2 protocol + L2_ETHER_IP = 0x1, ///< Ethernet with IP payload + L2_ETHER_TIMESYNC = 0x2, ///< Ethernet with TimeSync protocol + L2_ETHER_ARP = 0x3, ///< Ethernet with ARP protocol + L2_ETHER_LLDP = 0x4, ///< Ethernet with LLDP protocol + L2_ETHER_NSH = 0x5, ///< Ethernet with NSH protocol + L2_ETHER_VLAN = 0x6, ///< Ethernet with VLAN tagging + L2_ETHER_QINQ = 0x7, ///< Ethernet with QinQ tagging + L2_ETHER_PPPOE = 0x8, ///< Ethernet with PPPoE encapsulation + L2_ETHER_FCOE = 0x9, ///< Ethernet with FCoE protocol + L2_ETHER_MPLS = 0xA ///< Ethernet with MPLS +}; + +enum L3PType : uint8_t { + L3_UNKNOWN = 0x0, ///< Unknown L3 protocol + L3_IPV4 = 0x1, ///< IPv4 protocol + L3_IPV4_EXT = 0x3, ///< IPv4 with extensions + L3_IPV6 = 0x4, ///< IPv6 protocol + L3_IPV6_EXT = 0xC ///< IPv6 with extensions +}; + +enum L4PType : uint8_t { + L4_UNKNOWN = 0x0, ///< Unknown L4 protocol + L4_TCP = 0x1, ///< TCP protocol + L4_UDP = 0x2, ///< UDP protocol + L4_FRAG = 0x3, ///< Fragmented packet + L4_SCTP = 0x4, ///< SCTP protocol + L4_ICMP = 0x5, ///< ICMP protocol + L4_NONFRAG = 0x6, ///< Non-fragmented packet + L4_IGMP = 0x7 ///< IGMP protocol +}; + +struct Metadata_CTT { + struct timeval ts; ///< Timestamp; invalid if all bits are 1 + uint16_t vlan_tci; ///< VLAN Tag Control Information from outer VLAN + bool vlan_vld : 1; ///< VLAN valid flag; indicates if VLAN TCI is valid + bool vlan_stripped : 1; ///< VLAN stripped flag; outer VLAN only + CsumStatus ip_csum_status : 2; ///< IP checksum status + CsumStatus l4_csum_status : 2; ///< Layer 4 checksum status + ParserStatus parser_status : 2;///< Final state of FPGA parser + uint8_t ifc; ///< Interface (IFC) number + uint16_t filter_bitmap; ///< Filter bitmap; each filter rule can have several mark bits + bool ctt_export_trig : 1; ///< CTT flag; packet triggered export in CTT + bool ctt_rec_matched : 1; ///< CTT flag; packet matched record in CTT + bool ctt_rec_created : 1; ///< CTT flag; packet created record in CTT + bool ctt_rec_deleted : 1; ///< CTT flag; packet deleted record in CTT + uint64_t flow_hash; ///< Flow hash; not the same as RSS hash + uint8_t l2_len : 7; ///< Length of the L2 layer, if known + uint16_t l3_len : 9; ///< Length of the L3 layer, if known + uint8_t l4_len : 8; ///< Length of the L4 layer, if known + L2PType l2_ptype : 4; ///< Type of the L2 layer + L3PType l3_ptype : 4; ///< Type of the L3 layer + L4PType l4_ptype : 4; ///< Type of the L4 layer +}; + +} + +#endif // IPXP_INPUT_CTT_HPP \ No newline at end of file diff --git a/include/ipfixprobe/flowifc.hpp b/include/ipfixprobe/flowifc.hpp index 2014f9a27..6d589adeb 100644 --- a/include/ipfixprobe/flowifc.hpp +++ b/include/ipfixprobe/flowifc.hpp @@ -38,6 +38,7 @@ #include #include #include +#include #ifdef WITH_NEMEA #include @@ -49,6 +50,7 @@ #include #include "ipaddr.hpp" #include +#include namespace ipxp { @@ -247,7 +249,29 @@ struct Record { * \brief Flow record struct constaining basic flow record data and extension headers. */ struct Flow : public Record { - uint64_t flow_hash; + static inline const int MAXIMAL_PROCESS_PLUGIN_COUNT = 64; + /** + * \brief Plugins status struct describes flow information required by process plugins. + */ + struct PluginsStatus { + // get_no_data[i] == true -> i-th process plugin requires no flow data + // get_no_data[i] == false && get_all_data[i] == true -> i-th process plugin requires all + // available flow data + // get_no_data[i] == false && get_all_data[i] == false -> i-th process plugin requires + // only metadata + std::bitset get_all_data; + std::bitset get_no_data; + }; + + uint64_t flow_hash; + + #ifdef WITH_CTT + uint64_t flow_hash_ctt; /**< Flow hash for CTT. */ + #endif + + PluginsStatus plugins_status; /**< Statuses of the process plugins for this flow, used to check + if the flow process plugins requires all available data, only + metadata or nothing of this. */ struct timeval time_first; struct timeval time_last; @@ -272,4 +296,5 @@ struct Flow : public Record { }; } + #endif /* IPXP_FLOWIFC_HPP */ diff --git a/include/ipfixprobe/input.hpp b/include/ipfixprobe/input.hpp index aa5c09359..875d81516 100644 --- a/include/ipfixprobe/input.hpp +++ b/include/ipfixprobe/input.hpp @@ -68,6 +68,12 @@ class InputPlugin : public TelemetryUtils, public Plugin std::shared_ptr plugin_dir, std::shared_ptr queues_dir); +#ifdef WITH_CTT + virtual std::pair get_ctt_config() const { + throw PluginError("CTT is not supported by this input plugin"); + } +#endif /* WITH_CTT */ + protected: virtual void configure_telemetry_dirs( std::shared_ptr plugin_dir, diff --git a/include/ipfixprobe/ipfix-elements.hpp b/include/ipfixprobe/ipfix-elements.hpp index 0f4d1f0af..666ccd110 100644 --- a/include/ipfixprobe/ipfix-elements.hpp +++ b/include/ipfixprobe/ipfix-elements.hpp @@ -51,9 +51,8 @@ namespace ipxp { /** * Conversion from microseconds to NTP fraction (resolution 1/(2^32)s, ~233 picoseconds). - * Division by 1000000 would lead to wrong value when converting fraction back to microseconds, so 999999 is used. */ -#define NTP_USEC_TO_FRAC(usec) (uint32_t)(((uint64_t) usec << 32) / 999999) +#define NTP_USEC_TO_FRAC(usec) (uint32_t)(((uint64_t) usec << 32) / 1000000) /** * Create 64 bit NTP timestamp which consist of 32 bit seconds part and 32 bit fraction part. diff --git a/include/ipfixprobe/packet.hpp b/include/ipfixprobe/packet.hpp index 5b9e40cf9..81907be8d 100644 --- a/include/ipfixprobe/packet.hpp +++ b/include/ipfixprobe/packet.hpp @@ -38,6 +38,7 @@ #include #include +#include namespace ipxp { @@ -45,7 +46,7 @@ namespace ipxp { * \brief Structure for storing parsed packet fields */ struct Packet : public Record { - struct timeval ts; + timeval ts; uint8_t dst_mac[6]; uint8_t src_mac[6]; @@ -101,6 +102,10 @@ struct Packet : public Record { uint16_t buffer_size; /**< Size of buffer */ bool source_pkt; /**< Direction of packet from flow point of view */ +#ifdef WITH_CTT + Metadata_CTT cttmeta; /**< Metadata from CTT */ + bool cttmeta_valid; /**< True if CTT metadata is valid */ +#endif /* WITH_CTT */ /** * \brief Constructor. @@ -118,6 +123,9 @@ struct Packet : public Record { custom(nullptr), custom_len(0), buffer(nullptr), buffer_size(0), source_pkt(true) +#ifdef WITH_CTT + ,cttmeta_valid(false) +#endif /* WITH_CTT */ { } }; diff --git a/include/ipfixprobe/process.hpp b/include/ipfixprobe/process.hpp index b538767f6..2a2e24afa 100644 --- a/include/ipfixprobe/process.hpp +++ b/include/ipfixprobe/process.hpp @@ -38,85 +38,103 @@ #include "flowifc.hpp" namespace ipxp { - -/** - * \brief Tell storage plugin to flush (immediately export) current flow. - * Behavior when called from post_create, pre_update and post_update: flush current Flow and erase FlowRecord. - */ -#define FLOW_FLUSH 0x1 - -/** - * \brief Tell storage plugin to flush (immediately export) current flow. - * Behavior when called from post_create: flush current Flow and erase FlowRecord. - * Behavior when called from pre_update and post_update: flush current Flow, erase FlowRecord and call post_create on packet. - */ -#define FLOW_FLUSH_WITH_REINSERT 0x3 - /** * \brief Class template for flow cache plugins. */ class ProcessPlugin : public Plugin { public: - ProcessPlugin() {} - virtual ~ProcessPlugin() {} - virtual ProcessPlugin *copy() = 0; + enum FlowAction : int { + /** + * \brief Tell storage plugin that process plugin requires all incoming data for given flow. + */ + GET_ALL_DATA = 0, + /** + * \brief Tell storage plugin that process plugin requires only metadata. + * Used to offload the cache when all process plugin return GET_METADATA. + */ + GET_METADATA = 0x2, + /** + * \brief Tell storage plugin that process plugin has ended up its work and doesn't require + * any new data. Used to offload the cache when all process plugin return NO_PROCESS. + */ + NO_PROCESS = 0x4, + /** + * \brief Tell storage plugin to flush (immediately export) current flow. + * Behavior when called from post_create, pre_update and post_update: flush current Flow and + * erase FlowRecord. + */ + FLUSH = 0x1, + + /** + * \brief Tell storage plugin to flush (immediately export) current flow. + * Behavior when called from post_create: flush current Flow and erase FlowRecord. + * Behavior when called from pre_update and post_update: flush current Flow, erase + * FlowRecord and call post_create on packet. + */ + FLUSH_WITH_REINSERT = 0x7 + }; + + ProcessPlugin() {} + virtual ~ProcessPlugin() {} + virtual ProcessPlugin* copy() = 0; + + virtual RecordExt *get_ext() const + { + return nullptr; + } - virtual RecordExt *get_ext() const - { - return nullptr; - } + /** + * \brief Called before a new flow record is created. + * \param [in] pkt Parsed packet. + * \return 0 on success or FLOW_FLUSH option. + */ + virtual FlowAction pre_create(Packet& pkt) + { + return FlowAction::GET_ALL_DATA; + } - /** - * \brief Called before a new flow record is created. - * \param [in] pkt Parsed packet. - * \return 0 on success or FLOW_FLUSH option. - */ - virtual int pre_create(Packet &pkt) - { - return 0; - } + /** + * \brief Called after a new flow record is created. + * \param [in,out] rec Reference to flow record. + * \param [in] pkt Parsed packet. + * \return 0 on success or FLOW_FLUSH option. + */ + virtual FlowAction post_create(Flow& rec, const Packet& pkt) + { + return FlowAction::GET_ALL_DATA; + } - /** - * \brief Called after a new flow record is created. - * \param [in,out] rec Reference to flow record. - * \param [in] pkt Parsed packet. - * \return 0 on success or FLOW_FLUSH option. - */ - virtual int post_create(Flow &rec, const Packet &pkt) - { - return 0; - } + /** + * \brief Called before an existing record is update. + * \param [in,out] rec Reference to flow record. + * \param [in,out] pkt Parsed packet. + * \return 0 on success or FLOW_FLUSH option. + */ + virtual FlowAction pre_update(Flow& rec, Packet& pkt) + { + return FlowAction::GET_ALL_DATA; + } - /** - * \brief Called before an existing record is update. - * \param [in,out] rec Reference to flow record. - * \param [in,out] pkt Parsed packet. - * \return 0 on success or FLOW_FLUSH option. - */ - virtual int pre_update(Flow &rec, Packet &pkt) - { - return 0; - } + /** + * \brief Called after an existing record is updated. + * \param [in,out] rec Reference to flow record. + * \param [in,out] pkt Parsed packet. + * \return 0 on success or FLOW_FLUSH option. + */ + virtual FlowAction post_update(Flow& rec, const Packet& pkt) + { + return FlowAction::GET_ALL_DATA; + } - /** - * \brief Called after an existing record is updated. - * \param [in,out] rec Reference to flow record. - * \param [in,out] pkt Parsed packet. - * \return 0 on success or FLOW_FLUSH option. - */ - virtual int post_update(Flow &rec, const Packet &pkt) - { - return 0; - } + /** + * \brief Called before a flow record is exported from the cache. + * \param [in,out] rec Reference to flow record. + */ + virtual void pre_export(Flow& rec) + { - /** - * \brief Called before a flow record is exported from the cache. - * \param [in,out] rec Reference to flow record. - */ - virtual void pre_export(Flow &rec) - { - } + } }; } diff --git a/include/ipfixprobe/storage.hpp b/include/ipfixprobe/storage.hpp index f632e1b73..8abcdc275 100644 --- a/include/ipfixprobe/storage.hpp +++ b/include/ipfixprobe/storage.hpp @@ -53,7 +53,8 @@ class StoragePlugin : public Plugin private: ProcessPlugin **m_plugins; /**< Array of plugins. */ uint32_t m_plugin_cnt; - + Flow::PluginsStatus + m_plugins_status; /**< Keeps statuses of the process plugin before flow is created. */ public: StoragePlugin() : m_export_queue(nullptr), m_plugins(nullptr), m_plugin_cnt(0) { @@ -92,10 +93,15 @@ class StoragePlugin : public Plugin virtual void export_expired(time_t ts) { } + virtual void finish() { } +#ifdef WITH_CTT + virtual void set_ctt_config(const std::string& device_name, unsigned comp_index) = 0; +#endif /* WITH_CTT */ + /** * \brief set telemetry directory for the storage */ @@ -109,6 +115,11 @@ class StoragePlugin : public Plugin */ void add_plugin(ProcessPlugin *plugin) { + if (m_plugin_cnt == Flow::MAXIMAL_PROCESS_PLUGIN_COUNT) { + throw PluginError( + std::string("Maximal amount of process plugins is ") + + std::to_string(Flow::MAXIMAL_PROCESS_PLUGIN_COUNT)); + } if (m_plugins == nullptr) { m_plugins = new ProcessPlugin*[8]; } else { @@ -125,77 +136,192 @@ class StoragePlugin : public Plugin m_plugins[m_plugin_cnt++] = plugin; } -protected: - //Every StoragePlugin implementation should call these functions at appropriate places - /** - * \brief Call pre_create function for each added plugin. - * \param [in] pkt Input parsed packet. - * \return Options for flow cache. + * \brief Checks if process plugins require all available data. + * \param [in] flow Stored flow record. + * \return True if all data required, false otherwise. */ - int plugins_pre_create(Packet &pkt) + bool all_data_required(const Flow& flow) const noexcept { - int ret = 0; - for (unsigned int i = 0; i < m_plugin_cnt; i++) { - ret |= m_plugins[i]->pre_create(pkt); - } - return ret; + return flow.plugins_status.get_all_data.any(); } /** - * \brief Call post_create function for each added plugin. - * \param [in,out] rec Stored flow record. - * \param [in] pkt Input parsed packet. - * \return Options for flow cache. + * \brief Checks if process plugins don't require any data. + * \param [in] flow Stored flow record. + * \return True if no data required, false otherwise. */ - int plugins_post_create(Flow &rec, const Packet &pkt) + bool no_data_required(const Flow& flow) const noexcept { - int ret = 0; - for (unsigned int i = 0; i < m_plugin_cnt; i++) { - ret |= m_plugins[i]->post_create(rec, pkt); - } - return ret; + return flow.plugins_status.get_no_data.all(); } /** - * \brief Call pre_update function for each added plugin. - * \param [in,out] rec Stored flow record. - * \param [in] pkt Input parsed packet. - * \return Options for flow cache. + * \brief Checks if process plugins require only flow metadata. + * \param [in] rec Stored flow record. + * \return True if only metadata required, false otherwise. */ - int plugins_pre_update(Flow &rec, Packet &pkt) + bool only_metadata_required(const Flow& flow) const noexcept { - int ret = 0; - for (unsigned int i = 0; i < m_plugin_cnt; i++) { - ret |= m_plugins[i]->pre_update(rec, pkt); - } - return ret; + return !all_data_required(flow); } +protected: + //Every StoragePlugin implementation should call these functions at appropriate places - /** - * \brief Call post_update function for each added plugin. - * \param [in,out] rec Stored flow record. - * \param [in] pkt Input parsed packet. - */ - int plugins_post_update(Flow &rec, const Packet &pkt) - { - int ret = 0; - for (unsigned int i = 0; i < m_plugin_cnt; i++) { - ret |= m_plugins[i]->post_update(rec, pkt); - } - return ret; - } + /** + * \brief Call pre_create function for each added plugin. + * \param [in] pkt Input parsed packet. + * \return Options for flow cache. + */ + int plugins_pre_create(Packet& pkt) + { + PluginStatusConverter plugin_status_converter(m_plugins_status); + plugin_status_converter.reset(m_plugin_cnt); + int ret = 0; + for (unsigned int i = 0; i < m_plugin_cnt; i++) { + auto flow_action = m_plugins[i]->pre_create(pkt); + plugin_status_converter.set_flow_status(i, flow_action); + ret |= flow_action; + } + return ret; + } - /** - * \brief Call pre_export function for each added plugin. - * \param [in,out] rec Stored flow record. - */ - void plugins_pre_export(Flow &rec) - { - for (unsigned int i = 0; i < m_plugin_cnt; i++) { - m_plugins[i]->pre_export(rec); - } - } + /** + * \brief Call post_create function for each added plugin. + * \param [in,out] rec Stored flow record. + * \param [in] pkt Input parsed packet. + * \return Options for flow cache. + */ + int plugins_post_create(Flow& rec, const Packet& pkt) + { + PluginStatusConverter plugin_status_converter(m_plugins_status); + int ret = 0; + for (unsigned int i = 0; i < m_plugin_cnt; i++) { + if (plugin_status_converter.plugin_gets_no_data(i)) + continue; + + auto flow_action = m_plugins[i]->post_create(rec, pkt); + plugin_status_converter.set_flow_status(i, flow_action); + ret |= flow_action; + } + + PluginStatusConverter(rec.plugins_status) = plugin_status_converter; + return ret; + } + + /** + * \brief Call pre_update function for each added plugin. + * \param [in,out] rec Stored flow record. + * \param [in] pkt Input parsed packet. + * \return Options for flow cache. + */ + int plugins_pre_update(Flow& rec, Packet& pkt) + { + PluginStatusConverter plugin_status_converter(rec.plugins_status); + int ret = 0; + for (unsigned int i = 0; i < m_plugin_cnt; i++) { + if (plugin_status_converter.plugin_gets_no_data(i)) + continue; + + auto flow_action = m_plugins[i]->pre_update(rec, pkt); + plugin_status_converter.set_flow_status(i, flow_action); + ret |= flow_action; + } + return ret; + } + + /** + * \brief Call post_update function for each added plugin. + * \param [in,out] rec Stored flow record. + * \param [in] pkt Input parsed packet. + */ + int plugins_post_update(Flow& rec, const Packet& pkt) + { + PluginStatusConverter plugin_status_converter(rec.plugins_status); + int ret = 0; + for (unsigned int i = 0; i < m_plugin_cnt; i++) { + if (plugin_status_converter.plugin_gets_no_data(i)) + continue; + + auto flow_action = m_plugins[i]->post_update(rec, pkt); + plugin_status_converter.set_flow_status(i, flow_action); + ret |= flow_action; + } + return ret; + } + + /** + * \brief Call pre_export function for each added plugin. + * \param [in,out] rec Stored flow record. + */ + void plugins_pre_export(Flow& rec) + { + PluginStatusConverter plugin_status_converter(rec.plugins_status); + for (unsigned int i = 0; i < m_plugin_cnt; i++) { + if (plugin_status_converter.plugin_gets_no_data(i)) + continue; + m_plugins[i]->pre_export(rec); + } + } + + /** + * \brief Auxiliary class for manipulations plugins status. + */ + class PluginStatusConverter { + public: + PluginStatusConverter(Flow::PluginsStatus& plugins_status) noexcept + : m_plugins_status(plugins_status) + { + } + + /** + * \brief Resets all kept plugins status to the initial state. + * \param [in] plugin_count Count of process plugins. + */ + void reset(size_t plugin_count) noexcept + { + m_plugins_status.get_all_data.reset(); + m_plugins_status.get_no_data = (uint64_t) -1 << plugin_count; + } + + /** + * \brief Sets process plugin status at the given index. + * \param [in] index Index of the process plugin. + * \param [in] flow_action Given flow action to set. + */ + void set_flow_status(size_t index, ProcessPlugin::FlowAction flow_action) noexcept + { + if (flow_action == ProcessPlugin::FlowAction::NO_PROCESS) { + m_plugins_status.get_all_data[index] = false; + m_plugins_status.get_no_data[index] = true; + } else if (flow_action == ProcessPlugin::FlowAction::GET_METADATA) { + m_plugins_status.get_all_data[index] = false; + } else if (flow_action == ProcessPlugin::FlowAction::GET_ALL_DATA) { + m_plugins_status.get_all_data[index] = true; + } + } + + /** + * \brief Checks if the process plugin at the given index doesn't require any data. + * \param [in] index Index of the process plugin. + * \return True, if the process plugin doesn't require any data. + */ + bool plugin_gets_no_data(size_t index) noexcept + { + return m_plugins_status.get_no_data[index]; + } + + PluginStatusConverter& + operator=(const PluginStatusConverter& plugin_status_converter) noexcept + { + m_plugins_status.get_all_data = plugin_status_converter.m_plugins_status.get_all_data; + m_plugins_status.get_no_data = plugin_status_converter.m_plugins_status.get_no_data; + return *this; + } + + private: + Flow::PluginsStatus& m_plugins_status; + }; }; } diff --git a/input/dpdk.cpp b/input/dpdk.cpp index 5c48153e4..8afb02e25 100644 --- a/input/dpdk.cpp +++ b/input/dpdk.cpp @@ -237,10 +237,13 @@ void DpdkReader::configure_telemetry_dirs( auto port_dir = ports_dir->addDir(std::to_string(portID)); telemetry::FileOps statsOps = {[=]() { return get_port_telemetry(portID); }, nullptr}; register_file(port_dir, "stats", statsOps); + m_portsTelemetry.emplace_back(portID, port_dir); } telemetry::FileOps statsOps = {[=]() { return get_queue_telemetry(); }, nullptr}; register_file(queues_dir, "input-stats", statsOps); + + m_dpdkTelemetry = std::make_unique(plugin_dir); } void DpdkReader::init(const char* params) diff --git a/input/dpdk.h b/input/dpdk.h index fa8ef501c..c6987cd29 100644 --- a/input/dpdk.h +++ b/input/dpdk.h @@ -31,6 +31,8 @@ #define IPXP_DPDK_READER_H #include "dpdk/dpdkDevice.hpp" +#include "dpdk/dpdkPortTelemetry.hpp" +#include "dpdk/dpdkTelemetry.hpp" #include #include @@ -44,7 +46,7 @@ namespace ipxp { class DpdkOptParser : public OptionsParser { private: static constexpr size_t DEFAULT_MBUF_BURST_SIZE = 64; - static constexpr size_t DEFAULT_MBUF_POOL_SIZE = 16384; + static constexpr size_t DEFAULT_MBUF_POOL_SIZE = 4096; size_t pkt_buffer_size_; size_t pkt_mempool_size_; std::vector port_numbers_; @@ -226,6 +228,9 @@ class DpdkReader : public InputPlugin { telemetry::Content get_queue_telemetry(); telemetry::Content get_port_telemetry(uint16_t portNumber); + std::vector m_portsTelemetry; + std::unique_ptr m_dpdkTelemetry; + struct DpdkRxStats { uint64_t receivedPackets; uint64_t receivedBytes; diff --git a/input/dpdk/dpdkCompat.hpp b/input/dpdk/dpdkCompat.hpp new file mode 100644 index 000000000..e3c7dfe94 --- /dev/null +++ b/input/dpdk/dpdkCompat.hpp @@ -0,0 +1,96 @@ +/** + * \file + * \brief Compatible definitions for DPDK versions. + * \author Pavel Siska + * \date 2024 + */ +/* + * Copyright (C) 2024 CESNET + * + * LICENSE TERMS + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the Company nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + */ + +#include +#include + +#if RTE_VERSION < RTE_VERSION_NUM(22, 0, 0, 0) +#define RTE_ETH_MQ_RX_RSS ETH_MQ_RX_RSS +#endif + +#if RTE_VERSION < RTE_VERSION_NUM(21, 11, 0, 0) +#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE DEV_TX_OFFLOAD_MBUF_FAST_FREE + +#define RTE_ETH_RX_OFFLOAD_CHECKSUM DEV_RX_OFFLOAD_CHECKSUM + +#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP DEV_RX_OFFLOAD_VLAN_STRIP +#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM DEV_RX_OFFLOAD_IPV4_CKSUM +#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM DEV_RX_OFFLOAD_UDP_CKSUM +#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM DEV_RX_OFFLOAD_TCP_CKSUM +#define RTE_ETH_RX_OFFLOAD_TCP_LRO DEV_RX_OFFLOAD_TCP_LRO +#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP DEV_RX_OFFLOAD_QINQ_STRIP +#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM +#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP DEV_RX_OFFLOAD_MACSEC_STRIP +#define RTE_ETH_RX_OFFLOAD_HEADER_SPLIT DEV_RX_OFFLOAD_HEADER_SPLIT +#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER DEV_RX_OFFLOAD_VLAN_FILTER +#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND DEV_RX_OFFLOAD_VLAN_EXTEND +#define RTE_ETH_RX_OFFLOAD_SCATTER DEV_RX_OFFLOAD_SCATTER +#define RTE_ETH_RX_OFFLOAD_TIMESTAMP DEV_RX_OFFLOAD_TIMESTAMP +#define RTE_ETH_RX_OFFLOAD_SECURITY DEV_RX_OFFLOAD_SECURITY +#define RTE_ETH_RX_OFFLOAD_KEEP_CRC DEV_RX_OFFLOAD_KEEP_CRC +#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM DEV_RX_OFFLOAD_SCTP_CKSUM +#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM DEV_RX_OFFLOAD_OUTER_UDP_CKSUM +#define RTE_ETH_RX_OFFLOAD_RSS_HASH DEV_RX_OFFLOAD_RSS_HASH + +#define RTE_ETH_MQ_TX_NONE ETH_MQ_TX_NONE + +#define RTE_ETH_MQ_RX_NONE ETH_MQ_RX_NONE + +#define RTE_ETH_RSS_IP ETH_RSS_IP +#define RTE_ETH_RSS_UDP ETH_RSS_UDP +#define RTE_ETH_RSS_TCP ETH_RSS_TCP +#define RTE_ETH_RSS_SCTP ETH_RSS_SCTP +#define RTE_ETH_RSS_TUNNEL ETH_RSS_TUNNEL + +#define RTE_ETH_RSS_L3_SRC_ONLY ETH_RSS_L3_SRC_ONLY +#define RTE_ETH_RSS_L3_DST_ONLY ETH_RSS_L3_DST_ONLY +#define RTE_ETH_RSS_L4_SRC_ONLY ETH_RSS_L4_SRC_ONLY +#define RTE_ETH_RSS_L4_DST_ONLY ETH_RSS_L4_DST_ONLY + +#define RTE_ETH_RSS_IPV4 ETH_RSS_IPV4 +#define RTE_ETH_RSS_FRAG_IPV4 ETH_RSS_FRAG_IPV4 +#define RTE_ETH_RSS_NONFRAG_IPV4_TCP ETH_RSS_NONFRAG_IPV4_TCP +#define RTE_ETH_RSS_NONFRAG_IPV4_UDP ETH_RSS_NONFRAG_IPV4_UDP +#define RTE_ETH_RSS_NONFRAG_IPV4_SCTP ETH_RSS_NONFRAG_IPV4_SCTP +#define RTE_ETH_RSS_NONFRAG_IPV4_OTHER ETH_RSS_NONFRAG_IPV4_OTHER +#define RTE_ETH_RSS_IPV6 ETH_RSS_IPV6 +#define RTE_ETH_RSS_FRAG_IPV6 ETH_RSS_FRAG_IPV6 +#define RTE_ETH_RSS_NONFRAG_IPV6_TCP ETH_RSS_NONFRAG_IPV6_TCP +#define RTE_ETH_RSS_NONFRAG_IPV6_UDP ETH_RSS_NONFRAG_IPV6_UDP +#define RTE_ETH_RSS_NONFRAG_IPV6_SCTP ETH_RSS_NONFRAG_IPV6_SCTP +#define RTE_ETH_RSS_NONFRAG_IPV6_OTHER ETH_RSS_NONFRAG_IPV6_OTHER +#define RTE_ETH_RSS_L2_PAYLOAD ETH_RSS_L2_PAYLOAD +#define RTE_ETH_RSS_IPV6_EX ETH_RSS_IPV6_EX +#define RTE_ETH_RSS_IPV6_TCP_EX ETH_RSS_IPV6_TCP_EX +#define RTE_ETH_RSS_IPV6_UDP_EX ETH_RSS_IPV6_UDP_EX +#define RTE_ETH_RSS_PORT ETH_RSS_PORT +#define RTE_ETH_RSS_VXLAN ETH_RSS_VXLAN +#define RTE_ETH_RSS_NVGRE ETH_RSS_NVGRE +#define RTE_ETH_RSS_GTPU ETH_RSS_GTPU +#define RTE_ETH_RSS_GENEVE ETH_RSS_GENEVE + +#define RTE_BIT64(nr) (UINT64_C(1) << (nr)) + +#endif \ No newline at end of file diff --git a/input/dpdk/dpdkDevice.cpp b/input/dpdk/dpdkDevice.cpp index 8c92cc18c..cf3a6df5b 100644 --- a/input/dpdk/dpdkDevice.cpp +++ b/input/dpdk/dpdkDevice.cpp @@ -54,7 +54,7 @@ DpdkDevice::DpdkDevice( recognizeDriver(); configurePort(); initMemPools(memPoolSize); - setupRxQueues(); + setupRxQueues(memPoolSize); configureRSS(); enablePort(); } @@ -148,11 +148,7 @@ rte_eth_conf DpdkDevice::createPortConfig() #endif if (m_supportedRSS) { -#if RTE_VERSION >= RTE_VERSION_NUM(21, 11, 0, 0) portConfig.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS; -#else - portConfig.rxmode.mq_mode = ETH_MQ_RX_RSS; -#endif } else { portConfig.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE; } @@ -194,13 +190,13 @@ void DpdkDevice::initMemPools(uint16_t memPoolSize) } } -void DpdkDevice::setupRxQueues() +void DpdkDevice::setupRxQueues(uint16_t memPoolSize) { for (uint16_t rxQueueID = 0; rxQueueID < m_rxQueueCount; rxQueueID++) { int ret = rte_eth_rx_queue_setup( m_portID, rxQueueID, - m_mBufsCount, + memPoolSize, rte_eth_dev_socket_id(m_portID), nullptr, m_memPools[rxQueueID]); @@ -219,25 +215,36 @@ void DpdkDevice::configureRSS() return; } - constexpr size_t RSS_KEY_LEN = 40; - // biflow hash key - static uint8_t rssKey[RSS_KEY_LEN] - = {0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, - 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, - 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A}; + rte_eth_dev_info rteDevInfo; + if (rte_eth_dev_info_get(m_portID, &rteDevInfo)) { + throw PluginError("DpdkDevice::configureRSS() has failed. Unable to get rte dev info"); + } + + const uint8_t rssHashKeySize = rteDevInfo.hash_key_size; - struct rte_eth_rss_conf rssConfig - = {.rss_key = rssKey, - .rss_key_len = RSS_KEY_LEN, -#if RTE_VERSION >= RTE_VERSION_NUM(21, 11, 0, 0) - .rss_hf = RTE_ETH_RSS_IP, -#else - .rss_hf = ETH_RSS_IP, -#endif - }; + m_hashKey.resize(rssHashKeySize); + std::generate( + m_hashKey.begin(), + m_hashKey.end(), + [idx = static_cast(0)]() mutable { + static const std::array hashKey = {0x6D, 0x5A}; + return hashKey[idx++ % sizeof(hashKey)]; + }); + + const uint64_t rssOffloads = rteDevInfo.flow_type_rss_offloads & RTE_ETH_RSS_IP; + if (rssOffloads != RTE_ETH_RSS_IP) { + std::cerr << "RTE_ETH_RSS_IP is not supported by the card. Used subset: " << rssOffloads << std::endl; + } + + struct rte_eth_rss_conf rssConfig = {}; + rssConfig.rss_key = m_hashKey.data(); + rssConfig.rss_key_len = rssHashKeySize; + rssConfig.rss_hf = rssOffloads; - if (rte_eth_dev_rss_hash_update(m_portID, &rssConfig)) { - std::cerr << "Setting RSS hash for port " << m_portID << "." << std::endl; + int ret = rte_eth_dev_rss_hash_update(m_portID, &rssConfig); + if (ret < 0) { + std::cerr << "Setting RSS {" << rssOffloads << "} for port " << m_portID << " failed. Errno:" << ret << std::endl; + throw PluginError("DpdkDevice::configureRSS() has failed."); } } diff --git a/input/dpdk/dpdkDevice.hpp b/input/dpdk/dpdkDevice.hpp index 0fe07ef64..ef7d90ff0 100644 --- a/input/dpdk/dpdkDevice.hpp +++ b/input/dpdk/dpdkDevice.hpp @@ -26,6 +26,7 @@ #pragma once #include "dpdkMbuf.hpp" +#include "dpdkCompat.hpp" #include #include @@ -76,7 +77,7 @@ class DpdkDevice { void configurePort(); rte_eth_conf createPortConfig(); void initMemPools(uint16_t memPoolSize); - void setupRxQueues(); + void setupRxQueues(uint16_t memPoolSize); void configureRSS(); void enablePort(); void createRteMempool(uint16_t mempoolSize); @@ -84,7 +85,8 @@ class DpdkDevice { void registerRxTimestamp(); std::vector m_memPools; - uint16_t m_portID; + std::vector m_hashKey; + uint16_t m_portID; uint16_t m_rxQueueCount; uint16_t m_txQueueCount; uint16_t m_mBufsCount; diff --git a/input/dpdk/dpdkPortTelemetry.cpp b/input/dpdk/dpdkPortTelemetry.cpp new file mode 100644 index 000000000..e8f802b9c --- /dev/null +++ b/input/dpdk/dpdkPortTelemetry.cpp @@ -0,0 +1,339 @@ +/** + * \file + * \brief Implementation of the DpdkPortTelemetry class and related helper functions + * \author Pavel Siska + * \date 2024 + */ +/* + * Copyright (C) 2024 CESNET + * + * LICENSE TERMS + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the Company nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + */ + +#include "dpdkPortTelemetry.hpp" +#include "dpdkCompat.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace ipxp { + +static struct rte_eth_dev_info getDeviceInfo(uint16_t portId) +{ + struct rte_eth_dev_info devInfo; + + const int ret = rte_eth_dev_info_get(portId, &devInfo); + if (ret < 0) { + throw std::runtime_error("getDeviceInfo() has failed"); + } + + return devInfo; +} + +static std::string getDeviceNameByPortId(uint16_t portId) +{ + std::array deviceName; + + const int ret = rte_eth_dev_get_name_by_port(portId, deviceName.data()); + if (ret < 0) { + return ""; + } + + return {deviceName.data()}; +} + +static std::string getRssHashKeyByPortId(uint16_t portId) +{ + uint8_t rssHashKeySize = 0; + try { + rssHashKeySize = getDeviceInfo(portId).hash_key_size; + } catch (const std::exception& ex) { + return ""; + } + + std::vector rssHashKey(rssHashKeySize); + + struct rte_eth_rss_conf rssConf = {}; + rssConf.rss_key = rssHashKey.data(); + rssConf.rss_key_len = rssHashKeySize; + + const int ret = rte_eth_dev_rss_hash_conf_get(portId, &rssConf); + if (ret < 0) { + return ""; + } + + std::ostringstream oss; + for (const auto& byte : rssHashKey) { + oss << std::hex << std::setw(2) << std::setfill('0') << static_cast(byte); + } + return oss.str(); +} + +static std::string getRssHashByPortId(uint16_t portId) +{ + struct rte_eth_rss_conf rssConf = {}; + rssConf.rss_key = nullptr; + rssConf.rss_key_len = 0; + + const int ret = rte_eth_dev_rss_hash_conf_get(portId, &rssConf); + if (ret < 0) { + return ""; + } + + std::vector rssHashes; + + if ((rssConf.rss_hf & RTE_ETH_RSS_IPV4) != 0U) { + rssHashes.emplace_back("IPV4"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_FRAG_IPV4) != 0U) { + rssHashes.emplace_back("FRAG_IPV4"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) != 0U) { + rssHashes.emplace_back("NONFRAG_IPV4_TCP"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) != 0U) { + rssHashes.emplace_back("NONFRAG_IPV4_UDP"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) != 0U) { + rssHashes.emplace_back("NONFRAG_IPV4_SCTP"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_OTHER) != 0U) { + rssHashes.emplace_back("NONFRAG_IPV4_OTHER"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_IPV6) != 0U) { + rssHashes.emplace_back("IPV6"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_FRAG_IPV6) != 0U) { + rssHashes.emplace_back("FRAG_IPV6"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) != 0U) { + rssHashes.emplace_back("NONFRAG_IPV6_TCP"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) != 0U) { + rssHashes.emplace_back("NONFRAG_IPV6_UDP"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) != 0U) { + rssHashes.emplace_back("NONFRAG_IPV6_SCTP"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_OTHER) != 0U) { + rssHashes.emplace_back("NONFRAG_IPV6_OTHER"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_L2_PAYLOAD) != 0U) { + rssHashes.emplace_back("L2_PAYLOAD"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_IPV6_EX) != 0U) { + rssHashes.emplace_back("IPV6_EX"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) != 0U) { + rssHashes.emplace_back("IPV6_TCP_EX"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_IPV6_UDP_EX) != 0U) { + rssHashes.emplace_back("IPV6_UDP_EX"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_PORT) != 0U) { + rssHashes.emplace_back("PORT"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_VXLAN) != 0U) { + rssHashes.emplace_back("VXLAN"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_GENEVE) != 0U) { + rssHashes.emplace_back("GENEVE"); + } + if ((rssConf.rss_hf & RTE_ETH_RSS_NVGRE) != 0U) { + rssHashes.emplace_back("NVGRE"); + } +#if RTE_VERSION >= RTE_VERSION_NUM(21, 11, 0, 0) + if ((rssConf.rss_hf & RTE_ETH_RSS_MPLS) != 0U) { + rssHashes.emplace_back("MPLS"); + } +#endif + + const std::string concatenatedRssHash = std::accumulate( + rssHashes.begin(), + rssHashes.end(), + std::string {}, + [](const std::string& str1, const std::string& str2) { + return str1.empty() ? str2 : str1 + ", " + str2; + }); + + return concatenatedRssHash; +} + +static telemetry::Dict getDeviceStatsByPortId(uint16_t portId) +{ + struct rte_eth_stats stats; + const int ret = rte_eth_stats_get(portId, &stats); + if (ret < 0) { + return {}; + } + + telemetry::Dict statsDict = { + {"rx-ipackets", stats.ipackets}, + {"rx-ibytes", stats.ibytes}, + {"rx-imissed", stats.imissed}, + {"rx-ierrors", stats.ierrors}, + {"rx-nombuf", stats.rx_nombuf}, + {"tx-opackets", stats.opackets}, + {"tx-obytes", stats.obytes}, + {"tx-oerrors", stats.oerrors}, + }; + + return statsDict; +} + +static telemetry::Dict getDeviceQueueStatsByPortId(uint16_t portId) +{ + struct rte_eth_stats stats; + const int ret = rte_eth_stats_get(portId, &stats); + if (ret < 0) { + return {}; + } + + const rte_eth_dev_info devInfo = getDeviceInfo(portId); + + uint16_t maxQueuesCount; + if (RTE_ETHDEV_QUEUE_STAT_CNTRS > std::numeric_limits::max()) { + maxQueuesCount = std::numeric_limits::max(); + } else { + maxQueuesCount = static_cast(RTE_ETHDEV_QUEUE_STAT_CNTRS); + } + + const uint16_t rxQueuesCount = std::min(maxQueuesCount, devInfo.nb_rx_queues); + const uint16_t txQueuesCount = std::min(maxQueuesCount, devInfo.nb_tx_queues); + + telemetry::Dict dict; + + for (uint16_t queueId = 0; queueId < rxQueuesCount; queueId++) { + const std::string queueIdName = std::to_string(queueId); + dict[queueIdName + "-rx-ipackets"] = stats.q_ipackets[queueId]; + dict[queueIdName + "-rx-ibytes"] = stats.q_ibytes[queueId]; + dict[queueIdName + "-rx-ierrors"] = stats.q_errors[queueId]; + } + + for (uint16_t queueId = 0; queueId < txQueuesCount; queueId++) { + const std::string queueIdName = std::to_string(queueId); + dict[queueIdName + "-tx-opackets"] = stats.q_opackets[queueId]; + dict[queueIdName + "-tx-obytes"] = stats.q_obytes[queueId]; + } + + return dict; +} + +static telemetry::Dict getDeviceXStatsByPortId(uint16_t portId) +{ + int ret; + ret = rte_eth_xstats_get_names(portId, nullptr, 0); + if (ret < 0) { + return {}; + } + + const auto count = static_cast(ret); + + std::vector xstatsNames(count); + std::vector xstats(count); + + ret = rte_eth_xstats_get_names(portId, xstatsNames.data(), count); + if (ret < 0) { + return {}; + } + + ret = rte_eth_xstats_get(portId, xstats.data(), count); + if (ret < 0) { + return {}; + } + + telemetry::Dict dict; + for (unsigned int idx = 0; idx < count; idx++) { + dict[xstatsNames[idx].name] = xstats[idx].value; + } + + return dict; +} + +struct AppFsFile { + std::string name; + telemetry::FileOps ops; +}; + +static std::vector getAppFsFiles(uint16_t portId) +{ + std::vector files = { + { + .name = "devname", + .ops = { + .read = [portId]() { return getDeviceNameByPortId(portId); }, + }, + }, + { + .name = "rss_hash_key", + .ops = { + .read = [portId]() { return getRssHashKeyByPortId(portId); }, + }, + }, + { + .name = "rss_hash", + .ops = { + .read = [portId]() { return getRssHashByPortId(portId); }, + }, + }, + { + .name = "devstats", + .ops = { + .read = [portId]() { return getDeviceStatsByPortId(portId); }, + }, + }, + { + .name = "devstats_queues", + .ops = { + .read = [portId]() { return getDeviceQueueStatsByPortId(portId); }, + }, + }, + { + .name = "devxstats", + .ops = { + .read = [portId]() { return getDeviceXStatsByPortId(portId); }, + }, + }, + + }; + return files; +} + +DpdkPortTelemetry::DpdkPortTelemetry( + uint16_t portId, + const std::shared_ptr& dir) + : M_PORT_ID(portId) +{ + for (auto [name, ops] : getAppFsFiles(M_PORT_ID)) { + if (dir->getEntry(name)) { + continue; + } + auto file = dir->addFile(name, ops); + m_holder.add(file); + } +} + +} // namespace ct diff --git a/input/dpdk/dpdkPortTelemetry.hpp b/input/dpdk/dpdkPortTelemetry.hpp new file mode 100644 index 000000000..ab7823039 --- /dev/null +++ b/input/dpdk/dpdkPortTelemetry.hpp @@ -0,0 +1,58 @@ +/** + * \file + * \brief Class for managing port telemetry. + * \author Pavel Siska + * \date 2024 + */ +/* + * Copyright (C) 2024 CESNET + * + * LICENSE TERMS + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the Company nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + */ + +#pragma once + +#include +#include + +#include + +namespace ipxp { + +/** + * @brief Class for managing DPDK port telemetry + * + * This class integrates and monitors telemetry information for a specific DPDK port. + */ +class DpdkPortTelemetry { +public: + /** + * @brief Constructor for DpdkPortTelemetry + * + * Creates an instance of the class for a specific DPDK port and adds telemetry files to the + * directory. + * + * @param portId ID of the DPDK port + * @param dir Directory for telemetry files + */ + DpdkPortTelemetry(uint16_t portId, const std::shared_ptr& dir); + +private: + const uint16_t M_PORT_ID; + telemetry::Holder m_holder; +}; + +} // namespace ipxp \ No newline at end of file diff --git a/input/dpdk/dpdkTelemetry.cpp b/input/dpdk/dpdkTelemetry.cpp new file mode 100644 index 000000000..e50f85a4c --- /dev/null +++ b/input/dpdk/dpdkTelemetry.cpp @@ -0,0 +1,219 @@ +/** + * \file + * \brief Implementation of DpdkTelemetry class and helper functions for rings and mempools + * information retrieval. + * \author Pavel Siska + * \date 2024 + */ +/* + * Copyright (C) 2024 CESNET + * + * LICENSE TERMS + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the Company nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + */ + +#include "dpdkTelemetry.hpp" + +#include +#include + +#include +#include +#include +#include + +namespace ipxp { + +static void createRingsInfo(struct rte_ring* ring, void* arg) +{ + std::string& buffer = *reinterpret_cast(arg); + unsigned int count; + unsigned int freeCount; + unsigned int size; + unsigned int capacity; + int isFull; + int isEmpty; + + count = rte_ring_count(ring); + freeCount = rte_ring_free_count(ring); + size = rte_ring_get_size(ring); + capacity = rte_ring_get_capacity(ring); + isFull = rte_ring_full(ring); + isEmpty = rte_ring_empty(ring); + + if (buffer.empty()) { + buffer += "name "; + buffer += "flags "; + buffer += "usedCount "; + buffer += "freeCount "; + buffer += "size "; + buffer += "capacity "; + buffer += "status"; + buffer += "\n"; + } + + buffer += std::string(ring->name) + " "; + buffer += std::to_string(ring->flags) + " "; + buffer += std::to_string(count) + " "; + buffer += std::to_string(freeCount) + " "; + buffer += std::to_string(size) + " "; + buffer += std::to_string(capacity) + " "; + buffer += isFull == 1 ? "full" : isEmpty == 1 ? "empty" : "inUse"; + buffer += "\n"; +} + +static void ringsWalk(void (*fnc)(struct rte_ring*, void* ctx), void* arg) +{ + TAILQ_HEAD(rte_ring_list, rte_tailq_entry); + struct rte_ring_list* rings; + struct rte_tailq_entry* entry; + + rte_mcfg_tailq_read_lock(); + + rings = RTE_TAILQ_LOOKUP(RTE_TAILQ_RING_NAME, rte_ring_list); + if (rings == nullptr) { + rte_mcfg_tailq_read_unlock(); + throw std::runtime_error("RTE_TAILQ_LOOKUP(" RTE_TAILQ_RING_NAME ") failed"); + } + + try { + TAILQ_FOREACH(entry, rings, next) + { + fnc((struct rte_ring*) entry->data, arg); + } + } catch (...) { + rte_mcfg_tailq_read_unlock(); + throw; + } + + rte_mcfg_tailq_read_unlock(); +} + +static void createMempoolsInfo(struct rte_mempool* mempool, std::string& buffer) +{ + const rte_mempool_ops* ops = rte_mempool_get_ops(mempool->ops_index); + const unsigned int avail = rte_mempool_avail_count(mempool); + const unsigned int inUse = rte_mempool_in_use_count(mempool); + const int isFull = rte_mempool_full(mempool); + const int isEmpty = rte_mempool_empty(mempool); + const uint64_t totalSize = static_cast(mempool->populated_size) + * static_cast((mempool->elt_size + mempool->header_size + mempool->trailer_size)); + + if (buffer.empty()) { + buffer += "name "; + buffer += "socketID "; + buffer += "flags "; + buffer += "poolID "; + buffer += "size "; + buffer += "cacheSize "; + buffer += "elementSize "; + buffer += "headerSize "; + buffer += "trailerSize "; + buffer += "totalSize "; + buffer += "availableCount "; + buffer += "usedCount "; + buffer += "status "; + buffer += "Ops"; + buffer += "\n"; + } + + buffer += std::string(mempool->name) + " "; + buffer += std::to_string(mempool->socket_id) + " "; + buffer += std::to_string(mempool->flags) + " "; + buffer += std::to_string(mempool->pool_id) + " "; + buffer += std::to_string(mempool->size) + " "; + buffer += std::to_string(mempool->cache_size) + " "; + buffer += std::to_string(mempool->elt_size) + " "; + buffer += std::to_string(mempool->header_size) + " "; + buffer += std::to_string(mempool->trailer_size) + " "; + buffer += std::to_string(totalSize) + " "; + buffer += std::to_string(avail) + " "; + buffer += std::to_string(inUse) + " "; + buffer += (isFull == 1 ? "full " : isEmpty == 1 ? "empty " : "inUse "); + buffer += (ops != nullptr ? std::string(ops->name) : "(none)"); + buffer += "\n"; +} + +static std::string getMempoolsInfo() +{ + struct Walker { + std::string buffer; + std::exception_ptr exc = nullptr; + void operator()(rte_mempool* pool) + { + if (exc != nullptr) { + return; + } + try { + createMempoolsInfo(pool, buffer); + } catch (...) { + exc = std::current_exception(); + } + } + }; + Walker walker; + + rte_mempool_walk( + [](rte_mempool* pool, void* arg) { (*reinterpret_cast(arg))(pool); }, + &walker); + if (walker.exc != nullptr) { + std::rethrow_exception(walker.exc); + } + return walker.buffer; +} + +static std::string getRingsInfo() +{ + std::string buffer; + ringsWalk(&createRingsInfo, &buffer); + return buffer; +} + +struct AppFsFile { + std::string name; + telemetry::FileOps ops; +}; + +static std::vector getAppFsFiles() +{ + std::vector files = { + { + .name = "mempools", + .ops = { + .read = []() { return getMempoolsInfo(); }, + }, + }, + { + .name = "rings", + .ops = { + .read = []() { return getRingsInfo(); }, + }, + }, + }; + return files; +} + +DpdkTelemetry::DpdkTelemetry(const std::shared_ptr& dpdkDir) +{ + for (auto [name, ops] : getAppFsFiles()) { + if (dpdkDir->getEntry(name)) { + continue; + } + auto file = dpdkDir->addFile(name, ops); + m_holder.add(file); + } +} + +} // namespace ct diff --git a/input/dpdk/dpdkTelemetry.hpp b/input/dpdk/dpdkTelemetry.hpp new file mode 100644 index 000000000..83a6e276c --- /dev/null +++ b/input/dpdk/dpdkTelemetry.hpp @@ -0,0 +1,56 @@ +/** + * \file + * \brief Class for managing DPDK telemetry + * \author Pavel Siska + * \date 2024 + */ +/* + * Copyright (C) 2024 CESNET + * + * LICENSE TERMS + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the Company nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + */ + +#pragma once + +#include + +#include + +namespace ipxp { + +/** + * @brief Class for managing DPDK telemetry + * + * This class handles the integration of DPDK telemetry data (rings, mempools) into the telemetry + * directory. + */ +class DpdkTelemetry { +public: + /** + * @brief Constructor for DpdkTelemetry + * + * Initializes the DPDK telemetry manager and adds files representing DPDK rings and mempools to + * the provided telemetry directory. + * + * @param dpdkDir Pointer to the telemetry directory where files will be added. + */ + DpdkTelemetry(const std::shared_ptr& dpdkDir); + +private: + telemetry::Holder m_holder; +}; + +} // namespace ct diff --git a/input/ndp.cpp b/input/ndp.cpp index b1d623900..67796af61 100644 --- a/input/ndp.cpp +++ b/input/ndp.cpp @@ -27,20 +27,42 @@ * */ +#include #include #include #include +#include +#include +#include +#include +#include #include "ndp.hpp" +#include "ipfixprobe/packet.hpp" +#include "ipfixprobe/plugin.hpp" #include "parser.hpp" namespace ipxp { +uint64_t extract(const uint8_t* bitvec, size_t start_bit, size_t bit_length) { + size_t start_byte = start_bit / 8; + size_t end_bit = start_bit + bit_length; + size_t end_byte = (end_bit + 7) / 8; + uint64_t value = 0; + for (size_t i = 0; i < end_byte - start_byte; ++i) { + value |= static_cast(bitvec[start_byte + i]) << (8 * i); + } + value >>= (start_bit % 8); + uint64_t mask = (bit_length == 64) ? ~0ULL : ((1ULL << bit_length) - 1); + return value & mask; +} + telemetry::Content NdpPacketReader::get_queue_telemetry() { telemetry::Dict dict; dict["received_packets"] = m_stats.receivedPackets; dict["received_bytes"] = m_stats.receivedBytes; + dict["bad_metadata"] = m_stats.bad_metadata; return dict; } @@ -71,7 +93,11 @@ void NdpPacketReader::init(const char *params) if (parser.m_dev.empty()) { throw PluginError("specify device path"); } + if (parser.m_metadata == "ctt") { + m_ctt_metadata = true; + } init_ifc(parser.m_dev); + m_device = parser.m_dev; } void NdpPacketReader::close() @@ -79,6 +105,21 @@ void NdpPacketReader::close() ndpReader.close(); } +#ifdef WITH_CTT +std::pair NdpPacketReader::get_ctt_config() const +{ + std::string dev = m_device; + int channel_id = 0; + std::size_t delimiter_found = m_device.find_last_of(":"); + if (delimiter_found != std::string::npos) { + std::string channel_str = m_device.substr(delimiter_found + 1); + dev = m_device.substr(0, delimiter_found); + channel_id = std::stoi(channel_str); + } + return std::make_pair(dev, channel_id); +} +#endif /* WITH_CTT */ + void NdpPacketReader::init_ifc(const std::string &dev) { if (ndpReader.init_interface(dev) != 0) { @@ -86,6 +127,38 @@ void NdpPacketReader::init_ifc(const std::string &dev) } } +int NdpPacketReader::parse_ctt_metadata(const ndp_packet *ndp_packet, Metadata_CTT &ctt) +{ + if (ndp_packet->header_length != 32) { + return -1; + } + const uint8_t *metadata = ndp_packet->header; + + ctt.ts.tv_usec = extract(metadata, 0, 32); + ctt.ts.tv_sec = extract(metadata, 32, 32); + ctt.vlan_tci = extract(metadata, 64, 16); + ctt.vlan_vld = extract(metadata, 80, 1); + ctt.vlan_stripped = extract(metadata, 81, 1); + ctt.ip_csum_status = static_cast(extract(metadata, 82, 2)); + ctt.l4_csum_status = static_cast(extract(metadata, 84, 2)); + ctt.parser_status = static_cast(extract(metadata, 86, 2)); + ctt.ifc = extract(metadata, 88, 8); + ctt.filter_bitmap = extract(metadata, 96, 16); + ctt.ctt_export_trig = extract(metadata, 112, 1); + ctt.ctt_rec_matched = extract(metadata, 113, 1); + ctt.ctt_rec_created = extract(metadata, 114, 1); + ctt.ctt_rec_deleted = extract(metadata, 115, 1); + ctt.flow_hash = extract(metadata, 128, 64); + ctt.l2_len = extract(metadata, 192, 7); + ctt.l3_len = extract(metadata, 199, 9); + ctt.l4_len = extract(metadata, 208, 8); + ctt.l2_ptype = static_cast(extract(metadata, 216, 4)); + ctt.l3_ptype = static_cast(extract(metadata, 220, 4)); + ctt.l4_ptype = static_cast(extract(metadata, 224, 4)); + + return 0; +} + InputPlugin::Result NdpPacketReader::get(PacketBlock &packets) { parser_opt_t opt = {&packets, false, false, 0}; @@ -107,7 +180,25 @@ InputPlugin::Result NdpPacketReader::get(PacketBlock &packets) throw PluginError(ndpReader.error_msg); } read_pkts++; - parse_packet(&opt, m_parser_stats, timestamp, ndp_packet->data, ndp_packet->data_length, ndp_packet->data_length); +#ifdef WITH_CTT + if (m_ctt_metadata) { + Metadata_CTT ctt; + int ret = parse_ctt_metadata(ndp_packet, ctt); + if (ret == -1) { + m_stats.bad_metadata++; + parse_packet(&opt, m_parser_stats, timestamp, ndp_packet->data, ndp_packet->data_length, ndp_packet->data_length); + } else { + if (parse_packet_ctt_metadata(&opt, m_parser_stats, ctt, ndp_packet->data, ndp_packet->data_length, ndp_packet->data_length) == -1) { + m_stats.bad_metadata++; + parse_packet(&opt, m_parser_stats, timestamp, ndp_packet->data, ndp_packet->data_length, ndp_packet->data_length); + } + } + } else { +#endif /* WITH_CTT */ + parse_packet(&opt, m_parser_stats, timestamp, ndp_packet->data, ndp_packet->data_length, ndp_packet->data_length); +#ifdef WITH_CTT + } +#endif /* WITH_CTT */ } m_seen += read_pkts; diff --git a/input/ndp.hpp b/input/ndp.hpp index 77b81fef6..12a98a4a6 100644 --- a/input/ndp.hpp +++ b/input/ndp.hpp @@ -30,12 +30,14 @@ #ifndef IPXP_INPUT_NDP_HPP #define IPXP_INPUT_NDP_HPP +#include #include #include #include #include #include +#include namespace ipxp { @@ -44,13 +46,14 @@ class NdpOptParser : public OptionsParser public: std::string m_dev; uint64_t m_id; + std::string m_metadata; - NdpOptParser() : OptionsParser("ndp", "Input plugin for reading packets from a ndp device"), m_dev(""), m_id(0) + NdpOptParser() : OptionsParser("ndp", "Input plugin for reading packets from a ndp device"), m_dev(""), m_id(0), m_metadata("") { register_option("d", "dev", "PATH", "Path to a device file", [this](const char *arg){m_dev = arg; return true;}, OptionFlags::RequiredArgument); register_option("I", "id", "NUM", "Link identifier number", - [this](const char *arg){try {m_id = str2num(arg);} catch(std::invalid_argument &e) {return false;} return true;}, - OptionFlags::RequiredArgument); + [this](const char *arg){try {m_id = str2num(arg);} catch(std::invalid_argument &e) {return false;} return true;}, OptionFlags::RequiredArgument); + register_option("M", "meta", "Metadata type", "Choose metadata type if any", [this](const char *arg){m_metadata = arg; return true;}, OptionFlags::RequiredArgument); } }; @@ -70,10 +73,15 @@ class NdpPacketReader : public InputPlugin std::shared_ptr plugin_dir, std::shared_ptr queues_dir) override; +#ifdef WITH_CTT + virtual std::pair get_ctt_config() const override; +#endif /* WITH_CTT */ + private: struct RxStats { uint64_t receivedPackets; uint64_t receivedBytes; + uint64_t bad_metadata; }; telemetry::Content get_queue_telemetry(); @@ -81,7 +89,12 @@ class NdpPacketReader : public InputPlugin NdpReader ndpReader; RxStats m_stats = {}; + bool m_ctt_metadata = false; + + std::string m_device; + void init_ifc(const std::string &dev); + int parse_ctt_metadata(const ndp_packet *ndp_packet, Metadata_CTT &ctt); }; } diff --git a/input/parser.cpp b/input/parser.cpp index 61b696cbf..d5fded1c1 100644 --- a/input/parser.cpp +++ b/input/parser.cpp @@ -27,14 +27,15 @@ */ #include +#include #include #include -#include #include #include #include "parser.hpp" #include "headers.hpp" +#include #include namespace ipxp { @@ -776,4 +777,121 @@ void parse_packet(parser_opt_t *opt, ParserStats& stats, struct timeval ts, cons opt->pblock->bytes += len; } +#ifdef WITH_CTT +int parse_packet_ctt_metadata(parser_opt_t *opt, ParserStats& stats, const Metadata_CTT& metadata, const uint8_t *data, uint16_t len, uint16_t caplen) +{ + if (opt->pblock->cnt >= opt->pblock->size) { + return 0; + } + Packet *pkt = &opt->pblock->pkts[opt->pblock->cnt]; + + // check metadata validity + if (metadata.parser_status == PA_OK) { + pkt->cttmeta_valid = true; + } else { + pkt->cttmeta_valid = false; + return -1; + } + + pkt->cttmeta = metadata; + + pkt->packet_len_wire = len; + pkt->ts = metadata.ts; + pkt->src_port = 0; + pkt->dst_port = 0; + pkt->ip_proto = 0; + pkt->ip_ttl = 0; + pkt->ip_flags = 0; + pkt->ip_version = 0; + pkt->ip_payload_len = 0; + pkt->tcp_flags = 0; + pkt->tcp_window = 0; + pkt->tcp_options = 0; + pkt->tcp_mss = 0; + pkt->mplsTop = 0; + + stats.seen_packets++; + + uint16_t data_offset; + uint32_t l3_hdr_offset = metadata.l2_len; + uint32_t l4_hdr_offset = metadata.l2_len + metadata.l3_len; + + try { + // L2 + data_offset = parse_eth_hdr(data, caplen, pkt); + if (pkt->ethertype == ETH_P_TRILL) { + data_offset += parse_trill(data + metadata.l2_len, metadata.l2_len, pkt); + stats.trill_packets++; + data_offset += parse_eth_hdr(data + metadata.l2_len, metadata.l2_len, pkt); + } + + // L3 + if (metadata.l2_ptype == L2_ETHER_IP) { + if (metadata.l3_ptype == L3_IPV4 || metadata.l3_ptype == L3_IPV4_EXT) { + data_offset += parse_ipv4_hdr(data + metadata.l2_len, metadata.l3_len, pkt); + stats.ipv4_packets++; + } else if (metadata.l3_ptype == L3_IPV6 || metadata.l3_ptype == L3_IPV4_EXT) { + data_offset += parse_ipv6_hdr(data + metadata.l2_len, metadata.l3_len, pkt); + stats.ipv6_packets++; + } + } else if (metadata.l2_ptype == L2_ETHER_MPLS) { + data_offset += process_mpls(data + data_offset, caplen - data_offset, pkt); + stats.mpls_packets++; + } else if (metadata.l2_ptype == L2_ETHER_PPPOE) { + data_offset += process_pppoe(data + data_offset, caplen - data_offset, pkt); + stats.pppoe_packets++; + } else { // if not previous, we try delegate to original parser + parse_packet(opt, stats, metadata.ts, data, len, caplen); + return 0; + } + + // L4 + if (metadata.l4_ptype == L4_TCP) { + data_offset += parse_tcp_hdr(data + l4_hdr_offset, metadata.l4_len, pkt); + stats.tcp_packets++; + } else if (metadata.l4_ptype == L4_UDP) { + data_offset += parse_udp_hdr(data + l4_hdr_offset, metadata.l4_len, pkt); + stats.udp_packets++; + } else { // if not previous, we try delegate to original parser + parse_packet(opt, stats, metadata.ts, data, len, caplen); + return 0; + } + } catch (const char *err) { + DEBUG_MSG("%s\n", err); + return 0; + } + + if (pkt->vlan_id) { + stats.vlan_packets++; + } + + uint16_t pkt_len = caplen; + pkt->packet = data; + pkt->packet_len = caplen; + + if (l4_hdr_offset != l3_hdr_offset) { + if (l4_hdr_offset + pkt->ip_payload_len < 64) { + // Packet contains 0x00 padding bytes, do not include them in payload + pkt_len = l4_hdr_offset + pkt->ip_payload_len; + } + pkt->payload_len_wire = pkt->ip_payload_len - (data_offset - l4_hdr_offset); + } else { + pkt->payload_len_wire = pkt_len - data_offset; + } + + pkt->payload_len = pkt->payload_len_wire; + if (pkt->payload_len + data_offset > pkt_len) { + // Set correct size when payload length is bigger than captured payload length + pkt->payload_len = pkt_len - data_offset; + } + pkt->payload = pkt->packet + data_offset; + + DEBUG_MSG("Payload length:\t%u\n", pkt->payload_len); + DEBUG_MSG("Packet parser exits: packet parsed\n"); + opt->packet_valid = true; + opt->pblock->cnt++; + opt->pblock->bytes += len; + return 0; +} +#endif /* WITH_CTT */ } diff --git a/input/parser.hpp b/input/parser.hpp index 099508505..d9a3b7f44 100644 --- a/input/parser.hpp +++ b/input/parser.hpp @@ -31,6 +31,7 @@ #include #include +#include #ifdef WITH_PCAP #include @@ -85,5 +86,7 @@ typedef struct parser_opt_s { */ void parse_packet(parser_opt_t *opt, ParserStats& stats, struct timeval ts, const uint8_t *data, uint16_t len, uint16_t caplen); +int parse_packet_ctt_metadata(parser_opt_t *opt, ParserStats& stats, const Metadata_CTT& metadata, const uint8_t *data, uint16_t len, uint16_t caplen); + } #endif /* IPXP_INPUT_PARSER_HPP */ diff --git a/ipfixprobe.cpp b/ipfixprobe.cpp index 1c4ce6c33..9466a83dc 100644 --- a/ipfixprobe.cpp +++ b/ipfixprobe.cpp @@ -358,6 +358,10 @@ bool process_plugin_args(ipxp_conf_t &conf, IpfixprobeOptParser &parser) if (storage_plugin == nullptr) { throw IPXPError("invalid storage plugin " + storage_name); } +#ifdef WITH_CTT + const auto& [device, comp_idx] = input_plugin->get_ctt_config(); + storage_plugin->set_ctt_config(device, comp_idx); +#endif /* WITH_CTT */ storage_plugin->set_queue(output_queue); storage_plugin->init(storage_params.c_str()); storage_plugin->set_telemetry_dir(pipeline_queue_dir); diff --git a/process/basicplus.cpp b/process/basicplus.cpp index 4957bf4ab..a7823b4af 100644 --- a/process/basicplus.cpp +++ b/process/basicplus.cpp @@ -63,7 +63,7 @@ ProcessPlugin *BASICPLUSPlugin::copy() return new BASICPLUSPlugin(*this); } -int BASICPLUSPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction BASICPLUSPlugin::post_create(Flow &rec, const Packet &pkt) { RecordExtBASICPLUS *p = new RecordExtBASICPLUS(); @@ -78,10 +78,10 @@ int BASICPLUSPlugin::post_create(Flow &rec, const Packet &pkt) p->tcp_syn_size = pkt.ip_len; } - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } -int BASICPLUSPlugin::pre_update(Flow &rec, Packet &pkt) +ProcessPlugin::FlowAction BASICPLUSPlugin::pre_update(Flow &rec, Packet &pkt) { RecordExtBASICPLUS *p = (RecordExtBASICPLUS *) rec.get_extension(RecordExtBASICPLUS::REGISTERED_ID); uint8_t dir = pkt.source_pkt ? 0 : 1; @@ -98,7 +98,7 @@ int BASICPLUSPlugin::pre_update(Flow &rec, Packet &pkt) } // update tcp options mask across the tcp flow p->tcp_opt[dir] |= pkt.tcp_options; - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } } diff --git a/process/basicplus.hpp b/process/basicplus.hpp index 0955aeb8f..7779b8aa9 100644 --- a/process/basicplus.hpp +++ b/process/basicplus.hpp @@ -173,8 +173,8 @@ class BASICPLUSPlugin : public ProcessPlugin RecordExt *get_ext() const { return new RecordExtBASICPLUS(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); - int pre_update(Flow &rec, Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction pre_update(Flow &rec, Packet &pkt); }; } diff --git a/process/bstats.cpp b/process/bstats.cpp index 8c876611f..f6cd9348e 100644 --- a/process/bstats.cpp +++ b/process/bstats.cpp @@ -67,10 +67,6 @@ ProcessPlugin *BSTATSPlugin::copy() return new BSTATSPlugin(*this); } -int BSTATSPlugin::pre_create(Packet &pkt) -{ - return 0; -} #define BCOUNT burst_count[direction] void BSTATSPlugin::initialize_new_burst(RecordExtBSTATS *bstats_record, uint8_t direction, const Packet &pkt) @@ -133,26 +129,21 @@ void BSTATSPlugin::update_record(RecordExtBSTATS *bstats_record, const Packet &p } } -int BSTATSPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction BSTATSPlugin::post_create(Flow &rec, const Packet &pkt) { RecordExtBSTATS *bstats_record = new RecordExtBSTATS(); rec.add_extension(bstats_record); update_record(bstats_record, pkt); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } -int BSTATSPlugin::pre_update(Flow &rec, Packet &pkt) +ProcessPlugin::FlowAction BSTATSPlugin::pre_update(Flow &rec, Packet &pkt) { RecordExtBSTATS *bstats_record = static_cast(rec.get_extension(RecordExtBSTATS::REGISTERED_ID)); update_record(bstats_record, pkt); - return 0; -} - -int BSTATSPlugin::post_update(Flow &rec, const Packet &pkt) -{ - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } void BSTATSPlugin::pre_export(Flow &rec) diff --git a/process/bstats.hpp b/process/bstats.hpp index 9ca41efed..ec6961433 100644 --- a/process/bstats.hpp +++ b/process/bstats.hpp @@ -242,10 +242,8 @@ class BSTATSPlugin : public ProcessPlugin RecordExt *get_ext() const { return new RecordExtBSTATS(); } ProcessPlugin *copy(); - int pre_create(Packet &pkt); - int post_create(Flow &rec, const Packet &pkt); - int pre_update(Flow &rec, Packet &pkt); - int post_update(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction pre_update(Flow &rec, Packet &pkt); void pre_export(Flow &rec); static const struct timeval min_packet_in_burst; diff --git a/process/dns.cpp b/process/dns.cpp index f1a37216a..88cdd96f3 100644 --- a/process/dns.cpp +++ b/process/dns.cpp @@ -100,16 +100,16 @@ ProcessPlugin *DNSPlugin::copy() return new DNSPlugin(*this); } -int DNSPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction DNSPlugin::post_create(Flow &rec, const Packet &pkt) { if (pkt.dst_port == 53 || pkt.src_port == 53) { return add_ext_dns(reinterpret_cast(pkt.payload), pkt.payload_len, pkt.ip_proto == IPPROTO_TCP, rec); } - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } -int DNSPlugin::post_update(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction DNSPlugin::post_update(Flow &rec, const Packet &pkt) { if (pkt.dst_port == 53 || pkt.src_port == 53) { RecordExt *ext = rec.get_extension(RecordExtDNS::REGISTERED_ID); @@ -118,10 +118,10 @@ int DNSPlugin::post_update(Flow &rec, const Packet &pkt) } else { parse_dns(reinterpret_cast(pkt.payload), pkt.payload_len, pkt.ip_proto == IPPROTO_TCP, static_cast(ext)); } - return FLOW_FLUSH; + return ProcessPlugin::FlowAction::FLUSH; } - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } void DNSPlugin::finish(bool print_stats) @@ -663,16 +663,16 @@ bool DNSPlugin::parse_dns(const char *data, unsigned int payload_len, bool tcp, * \param [in] tcp DNS over tcp. * \param [out] rec Destination Flow. */ -int DNSPlugin::add_ext_dns(const char *data, unsigned int payload_len, bool tcp, Flow &rec) +ProcessPlugin::FlowAction DNSPlugin::add_ext_dns(const char *data, unsigned int payload_len, bool tcp, Flow &rec) { RecordExtDNS *ext = new RecordExtDNS(); if (!parse_dns(data, payload_len, tcp, ext)) { delete ext; - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } else { rec.add_extension(ext); } - return FLOW_FLUSH; + return ProcessPlugin::FlowAction::FLUSH; } } diff --git a/process/dns.hpp b/process/dns.hpp index 4786f0c2e..529cb9713 100644 --- a/process/dns.hpp +++ b/process/dns.hpp @@ -188,8 +188,8 @@ class DNSPlugin : public ProcessPlugin RecordExt *get_ext() const { return new RecordExtDNS(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); - int post_update(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_update(Flow &rec, const Packet &pkt); void finish(bool print_stats); private: @@ -201,7 +201,7 @@ class DNSPlugin : public ProcessPlugin uint32_t data_len; /**< Length of packet payload. */ bool parse_dns(const char *data, unsigned int payload_len, bool tcp, RecordExtDNS *rec); - int add_ext_dns(const char *data, unsigned int payload_len, bool tcp, Flow &rec); + ProcessPlugin::FlowAction add_ext_dns(const char *data, unsigned int payload_len, bool tcp, Flow &rec); void process_srv(std::string &str) const; void process_rdata(const char *record_begin, const char *data, std::ostringstream &rdata, uint16_t type, size_t length) const; diff --git a/process/dnssd.cpp b/process/dnssd.cpp index 5e3627cdc..ae1988ee8 100644 --- a/process/dnssd.cpp +++ b/process/dnssd.cpp @@ -113,16 +113,16 @@ ProcessPlugin *DNSSDPlugin::copy() return new DNSSDPlugin(*this); } -int DNSSDPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction DNSSDPlugin::post_create(Flow &rec, const Packet &pkt) { if (pkt.dst_port == 5353 || pkt.src_port == 5353) { return add_ext_dnssd(reinterpret_cast(pkt.payload), pkt.payload_len, pkt.ip_proto == IPPROTO_TCP, rec); } - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } -int DNSSDPlugin::post_update(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction DNSSDPlugin::post_update(Flow &rec, const Packet &pkt) { if (pkt.dst_port == 5353 || pkt.src_port == 5353) { RecordExt *ext = rec.get_extension(RecordExtDNSSD::REGISTERED_ID); @@ -133,10 +133,10 @@ int DNSSDPlugin::post_update(Flow &rec, const Packet &pkt) parse_dns(reinterpret_cast(pkt.payload), pkt.payload_len, pkt.ip_proto == IPPROTO_TCP, static_cast(ext)); } - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } void DNSSDPlugin::finish(bool print_stats) @@ -677,18 +677,17 @@ void DNSSDPlugin::filtered_append(RecordExtDNSSD *rec, std::string name, uint16_ * \param [in] tcp DNS over tcp. * \param [out] rec Destination Flow. */ -int DNSSDPlugin::add_ext_dnssd(const char *data, unsigned int payload_len, bool tcp, Flow &rec) +ProcessPlugin::FlowAction DNSSDPlugin::add_ext_dnssd(const char *data, unsigned int payload_len, bool tcp, Flow &rec) { RecordExtDNSSD *ext = new RecordExtDNSSD(); if (!parse_dns(data, payload_len, tcp, ext)) { delete ext; - - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } else { rec.add_extension(ext); } - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } } diff --git a/process/dnssd.hpp b/process/dnssd.hpp index 5d2cefc4b..ca57ca5cd 100644 --- a/process/dnssd.hpp +++ b/process/dnssd.hpp @@ -259,8 +259,8 @@ class DNSSDPlugin : public ProcessPlugin RecordExt *get_ext() const { return new RecordExtDNSSD(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); - int post_update(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_update(Flow &rec, const Packet &pkt); void finish(bool print_stats); private: @@ -273,7 +273,7 @@ class DNSSDPlugin : public ProcessPlugin uint32_t data_len; /**< Length of packet payload. */ bool parse_dns(const char *data, unsigned int payload_len, bool tcp, RecordExtDNSSD *rec); - int add_ext_dnssd(const char *data, unsigned int payload_len, bool tcp, Flow &rec); + ProcessPlugin::FlowAction add_ext_dnssd(const char *data, unsigned int payload_len, bool tcp, Flow &rec); void process_rdata(const char *record_begin, const char *data, DnsSdRr &rdata, uint16_t type, size_t length) const; void filtered_append(RecordExtDNSSD *rec, std::string name); void filtered_append(RecordExtDNSSD *rec, std::string name, uint16_t type, DnsSdRr &rdata); diff --git a/process/flexprobe-data-processing.h b/process/flexprobe-data-processing.h index fe022bdf2..ced609f16 100644 --- a/process/flexprobe-data-processing.h +++ b/process/flexprobe-data-processing.h @@ -84,10 +84,10 @@ class FlexprobeDataProcessing : public ProcessPlugin return new FlexprobeDataProcessing(*this); } - int post_create(Flow &rec, const Packet &pkt) override + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt) override { if (!pkt.custom) { - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } if (!rec.get_extension(FlexprobeData::REGISTERED_ID)) { @@ -97,7 +97,7 @@ class FlexprobeDataProcessing : public ProcessPlugin fd->interface_in = data_view->interface_in; rec.add_extension(fd); } - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } }; diff --git a/process/flexprobe-encryption-processing.cpp b/process/flexprobe-encryption-processing.cpp index 78410f3c1..563d5eb22 100644 --- a/process/flexprobe-encryption-processing.cpp +++ b/process/flexprobe-encryption-processing.cpp @@ -40,20 +40,20 @@ __attribute__((constructor)) static void register_this_plugin() FlexprobeEncryptionData::REGISTERED_ID = register_extension(); } -int FlexprobeEncryptionProcessing::post_create(Flow& rec, const Packet& pkt) +ProcessPlugin::FlowAction FlexprobeEncryptionProcessing::post_create(Flow& rec, const Packet& pkt) { if (!rec.get_extension(FlexprobeEncryptionData::REGISTERED_ID)) { auto ext = new FlexprobeEncryptionData(); rec.add_extension(ext); } - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } int FlexprobeEncryptionProcessing::post_update(Flow& rec, const Packet& pkt) { if (!pkt.custom) { - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } // convert timestamp to decimal @@ -79,7 +79,7 @@ int FlexprobeEncryptionProcessing::post_update(Flow& rec, const Packet& pkt) encr_data->mpe_4bit.update(1, encr_data->mpe4_valid_count); } - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } } diff --git a/process/flexprobe-encryption-processing.h b/process/flexprobe-encryption-processing.h index 056e932de..e18198a05 100644 --- a/process/flexprobe-encryption-processing.h +++ b/process/flexprobe-encryption-processing.h @@ -171,9 +171,9 @@ class FlexprobeEncryptionProcessing : public ProcessPlugin return new FlexprobeEncryptionProcessing(*this); } - int post_create(Flow &rec, const Packet &pkt) override; + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt) override; - int post_update(Flow& rec, const Packet& pkt) override; + ProcessPlugin::FlowAction post_update(Flow& rec, const Packet& pkt) override; }; } diff --git a/process/flexprobe-tcp-tracking.cpp b/process/flexprobe-tcp-tracking.cpp index 1c4596339..f5f740c9f 100644 --- a/process/flexprobe-tcp-tracking.cpp +++ b/process/flexprobe-tcp-tracking.cpp @@ -73,14 +73,14 @@ namespace ipxp return direction == 0 ? fs : FlowState::OK; } - int FlexprobeTcpTracking::post_create(Flow& rec, const Packet& pkt) + ProcessPlugin::FlowAction FlexprobeTcpTracking::post_create(Flow& rec, const Packet& pkt) { if (!pkt.custom) { - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } if (pkt.ip_proto != 0x6) { // track only TCP - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } auto data_view = reinterpret_cast(pkt.custom); @@ -99,17 +99,17 @@ namespace ipxp rec.add_extension(td); } - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } - int FlexprobeTcpTracking::post_update(Flow& rec, const Packet& pkt) + ProcessPlugin::FlowAction FlexprobeTcpTracking::post_update(Flow& rec, const Packet& pkt) { if (!pkt.custom) { - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } if (pkt.ip_proto != 0x6) { // track only TCP - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } auto data_view = reinterpret_cast(pkt.custom); @@ -124,7 +124,7 @@ namespace ipxp data_view->payload_size, pkt.tcp_flags & 0x2, pkt.tcp_flags & 0x1); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } auto check_result = check_(*tcp_data, next_tcp, direction); if (check_result == FlowState::PACKET_LOSS) { @@ -146,7 +146,7 @@ namespace ipxp break; } - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } } diff --git a/process/flexprobe-tcp-tracking.h b/process/flexprobe-tcp-tracking.h index 2bac1e1c6..c147e42b3 100644 --- a/process/flexprobe-tcp-tracking.h +++ b/process/flexprobe-tcp-tracking.h @@ -103,9 +103,9 @@ class FlexprobeTcpTracking : public ProcessPlugin return new FlexprobeTcpTracking(*this); } - int post_create(Flow &rec, const Packet &pkt) override; + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt) override; - int post_update(Flow &rec, const Packet &pkt) override; + ProcessPlugin::FlowAction post_update(Flow &rec, const Packet &pkt) override; }; } diff --git a/process/flow_hash.cpp b/process/flow_hash.cpp index 75098163f..59f46d2d9 100644 --- a/process/flow_hash.cpp +++ b/process/flow_hash.cpp @@ -62,7 +62,7 @@ ProcessPlugin *FLOW_HASHPlugin::copy() return new FLOW_HASHPlugin(*this); } -int FLOW_HASHPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction FLOW_HASHPlugin::post_create(Flow &rec, const Packet &pkt) { auto ext = new RecordExtFLOW_HASH(); @@ -70,7 +70,7 @@ int FLOW_HASHPlugin::post_create(Flow &rec, const Packet &pkt) rec.add_extension(ext); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } } diff --git a/process/flow_hash.hpp b/process/flow_hash.hpp index 95d5a0c8f..36f64afca 100644 --- a/process/flow_hash.hpp +++ b/process/flow_hash.hpp @@ -125,7 +125,7 @@ class FLOW_HASHPlugin : public ProcessPlugin RecordExt *get_ext() const { return new RecordExtFLOW_HASH(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); }; } diff --git a/process/http.cpp b/process/http.cpp index 9ed2a8861..226bd737c 100644 --- a/process/http.cpp +++ b/process/http.cpp @@ -99,19 +99,21 @@ ProcessPlugin* HTTPPlugin::copy() return new HTTPPlugin(*this); } -int HTTPPlugin::post_create(Flow& rec, const Packet& pkt) +ProcessPlugin::FlowAction HTTPPlugin::post_create(Flow& rec, const Packet& pkt) { const char* payload = reinterpret_cast(pkt.payload); if (is_request(payload, pkt.payload_len)) { add_ext_http_request(payload, pkt.payload_len, rec); + return ProcessPlugin::FlowAction::GET_ALL_DATA; } else if (is_response(payload, pkt.payload_len)) { add_ext_http_response(payload, pkt.payload_len, rec); + return ProcessPlugin::FlowAction::GET_ALL_DATA; } - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } -int HTTPPlugin::pre_update(Flow& rec, Packet& pkt) +ProcessPlugin::FlowAction HTTPPlugin::pre_update(Flow& rec, Packet& pkt) { RecordExt* ext = nullptr; const char* payload = reinterpret_cast(pkt.payload); @@ -119,29 +121,29 @@ int HTTPPlugin::pre_update(Flow& rec, Packet& pkt) ext = rec.get_extension(RecordExtHTTP::REGISTERED_ID); if (ext == nullptr) { /* Check if header is present in flow. */ add_ext_http_request(payload, pkt.payload_len, rec); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } parse_http_request(payload, pkt.payload_len, static_cast(ext)); if (flow_flush) { flow_flush = false; - return FLOW_FLUSH_WITH_REINSERT; + return ProcessPlugin::FlowAction::FLUSH_WITH_REINSERT; } } else if (is_response(payload, pkt.payload_len)) { ext = rec.get_extension(RecordExtHTTP::REGISTERED_ID); if (ext == nullptr) { /* Check if header is present in flow. */ add_ext_http_response(payload, pkt.payload_len, rec); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } parse_http_response(payload, pkt.payload_len, static_cast(ext)); if (flow_flush) { flow_flush = false; - return FLOW_FLUSH_WITH_REINSERT; + return ProcessPlugin::FlowAction::FLUSH_WITH_REINSERT; } } - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } void HTTPPlugin::finish(bool print_stats) diff --git a/process/http.hpp b/process/http.hpp index afd570d93..21341dac5 100644 --- a/process/http.hpp +++ b/process/http.hpp @@ -224,8 +224,8 @@ class HTTPPlugin : public ProcessPlugin std::string get_name() const { return "http"; } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); - int pre_update(Flow &rec, Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction pre_update(Flow &rec, Packet &pkt); void finish(bool print_stats); private: diff --git a/process/icmp.cpp b/process/icmp.cpp index a5c8858bb..e209586a5 100644 --- a/process/icmp.cpp +++ b/process/icmp.cpp @@ -63,12 +63,12 @@ ProcessPlugin *ICMPPlugin::copy() return new ICMPPlugin(*this); } -int ICMPPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction ICMPPlugin::post_create(Flow &rec, const Packet &pkt) { if (pkt.ip_proto == IPPROTO_ICMP || pkt.ip_proto == IPPROTO_ICMPV6) { if (pkt.payload_len < sizeof(RecordExtICMP::type_code)) - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; auto ext = new RecordExtICMP(); @@ -77,8 +77,9 @@ int ICMPPlugin::post_create(Flow &rec, const Packet &pkt) ext->type_code = *reinterpret_cast(pkt.payload); rec.add_extension(ext); + return ProcessPlugin::FlowAction::GET_ALL_DATA; } - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } } diff --git a/process/icmp.hpp b/process/icmp.hpp index d0553ae56..0032db889 100644 --- a/process/icmp.hpp +++ b/process/icmp.hpp @@ -138,7 +138,7 @@ class ICMPPlugin : public ProcessPlugin RecordExt *get_ext() const { return new RecordExtICMP(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); }; } diff --git a/process/idpcontent.cpp b/process/idpcontent.cpp index 7943fdaf8..22cca5931 100644 --- a/process/idpcontent.cpp +++ b/process/idpcontent.cpp @@ -79,21 +79,21 @@ void IDPCONTENTPlugin::update_record(RecordExtIDPCONTENT *idpcontent_data, const } } -int IDPCONTENTPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction IDPCONTENTPlugin::post_create(Flow &rec, const Packet &pkt) { RecordExtIDPCONTENT *idpcontent_data = new RecordExtIDPCONTENT(); memset(idpcontent_data->pkt_export_flg, 0, 2 * sizeof(uint8_t)); rec.add_extension(idpcontent_data); update_record(idpcontent_data, pkt); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } -int IDPCONTENTPlugin::post_update(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction IDPCONTENTPlugin::post_update(Flow &rec, const Packet &pkt) { RecordExtIDPCONTENT *idpcontent_data = static_cast(rec.get_extension(RecordExtIDPCONTENT::REGISTERED_ID)); update_record(idpcontent_data, pkt); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } } diff --git a/process/idpcontent.hpp b/process/idpcontent.hpp index 8cbd62452..2705a71b6 100644 --- a/process/idpcontent.hpp +++ b/process/idpcontent.hpp @@ -147,8 +147,8 @@ class IDPCONTENTPlugin : public ProcessPlugin RecordExt *get_ext() const { return new RecordExtIDPCONTENT(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); - int post_update(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_update(Flow &rec, const Packet &pkt); void update_record(RecordExtIDPCONTENT *pstats_data, const Packet &pkt); }; diff --git a/process/mpls.cpp b/process/mpls.cpp index aa8116163..ffc429b73 100644 --- a/process/mpls.cpp +++ b/process/mpls.cpp @@ -46,17 +46,17 @@ ProcessPlugin *MPLSPlugin::copy() return new MPLSPlugin(*this); } -int MPLSPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction MPLSPlugin::post_create(Flow &rec, const Packet &pkt) { if (pkt.mplsTop == 0) { - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } auto ext = new RecordExtMPLS(); ext->mpls = pkt.mplsTop; rec.add_extension(ext); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } } diff --git a/process/mpls.hpp b/process/mpls.hpp index 679a1bd75..d8367078d 100644 --- a/process/mpls.hpp +++ b/process/mpls.hpp @@ -123,7 +123,7 @@ class MPLSPlugin : public ProcessPlugin RecordExt *get_ext() const { return new RecordExtMPLS(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); }; } diff --git a/process/mqtt.cpp b/process/mqtt.cpp index 9eadda2ce..2423069b1 100644 --- a/process/mqtt.cpp +++ b/process/mqtt.cpp @@ -43,23 +43,25 @@ __attribute__((constructor)) static void register_this_plugin() RecordExtMQTT::REGISTERED_ID = register_extension(); } -int MQTTPlugin::post_create(Flow& rec, const Packet& pkt) +ProcessPlugin::FlowAction MQTTPlugin::post_create(Flow& rec, const Packet& pkt) { - if (has_mqtt_protocol_name(reinterpret_cast(pkt.payload), pkt.payload_len)) + if (has_mqtt_protocol_name(reinterpret_cast(pkt.payload), pkt.payload_len)) { add_ext_mqtt(reinterpret_cast(pkt.payload), pkt.payload_len, rec); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; + } + return ProcessPlugin::FlowAction::NO_PROCESS; } -int MQTTPlugin::pre_update(Flow& rec, Packet& pkt) +ProcessPlugin::FlowAction MQTTPlugin::pre_update(Flow& rec, Packet& pkt) { const char* payload = reinterpret_cast(pkt.payload); RecordExt* ext = rec.get_extension(RecordExtMQTT::REGISTERED_ID); if (ext == nullptr) { - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } else { parse_mqtt(payload, pkt.payload_len, static_cast(ext)); } - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } /** @@ -180,13 +182,13 @@ bool MQTTPlugin::parse_mqtt(const char* data, int payload_len, RecordExtMQTT* re return true; } -int MQTTPlugin::post_update(Flow& rec, const Packet& pkt) +ProcessPlugin::FlowAction MQTTPlugin::post_update(Flow& rec, const Packet& pkt) { if (flow_flush) { flow_flush = false; - return FLOW_FLUSH; + return ProcessPlugin::FlowAction::FLUSH; } - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } /** diff --git a/process/mqtt.hpp b/process/mqtt.hpp index ae1c676a1..f47344ad9 100644 --- a/process/mqtt.hpp +++ b/process/mqtt.hpp @@ -169,9 +169,10 @@ struct RecordExtMQTT : public RecordExt { class MQTTPlugin : public ProcessPlugin { public: - int post_create(Flow& rec, const Packet& pkt) override; - int pre_update(Flow& rec, Packet& pkt) override; - int post_update(Flow& rec, const Packet& pkt) override; + ProcessPlugin::FlowAction post_create(Flow& rec, const Packet& pkt) override; + ProcessPlugin::FlowAction pre_update(Flow& rec, Packet& pkt) override; + ProcessPlugin::FlowAction post_update(Flow& rec, const Packet& pkt) override; + RecordExt* get_ext() const { return new RecordExtMQTT(); } OptionsParser* get_parser() const { return new MQTTOptionsParser(); } std::string get_name() const { return "mqtt"; } diff --git a/process/netbios.cpp b/process/netbios.cpp index 2d69880fb..0c9461f99 100644 --- a/process/netbios.cpp +++ b/process/netbios.cpp @@ -67,32 +67,31 @@ ProcessPlugin *NETBIOSPlugin::copy() return new NETBIOSPlugin(*this); } -int NETBIOSPlugin::post_create(Flow &rec, const Packet &pkt) { +ProcessPlugin::FlowAction NETBIOSPlugin::post_create(Flow &rec, const Packet &pkt) { if (pkt.dst_port == 137 || pkt.src_port == 137) { return add_netbios_ext(rec, pkt); } - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } -int NETBIOSPlugin::post_update(Flow &rec, const Packet &pkt) { +ProcessPlugin::FlowAction NETBIOSPlugin::post_update(Flow &rec, const Packet &pkt) { if (pkt.dst_port == 137 || pkt.src_port == 137) { return add_netbios_ext(rec, pkt); } - - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } -int NETBIOSPlugin::add_netbios_ext(Flow &rec, const Packet &pkt) { +ProcessPlugin::FlowAction NETBIOSPlugin::add_netbios_ext(Flow &rec, const Packet &pkt) { RecordExtNETBIOS *ext = new RecordExtNETBIOS(); if (parse_nbns(ext, pkt)) { total_netbios_packets++; rec.add_extension(ext); } else { delete ext; + return ProcessPlugin::FlowAction::NO_PROCESS; } - - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } bool NETBIOSPlugin::parse_nbns(RecordExtNETBIOS *rec, const Packet &pkt) { diff --git a/process/netbios.hpp b/process/netbios.hpp index f05279920..d8e2d8829 100644 --- a/process/netbios.hpp +++ b/process/netbios.hpp @@ -125,14 +125,14 @@ class NETBIOSPlugin : public ProcessPlugin { RecordExt *get_ext() const { return new RecordExtNETBIOS(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); - int post_update(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_update(Flow &rec, const Packet &pkt); void finish(bool print_stats); private: int total_netbios_packets; - int add_netbios_ext(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction add_netbios_ext(Flow &rec, const Packet &pkt); bool parse_nbns(RecordExtNETBIOS *rec, const Packet &pkt); int get_query_count(const char *payload, uint16_t payload_length); bool store_first_query(const char *payload, RecordExtNETBIOS *rec); diff --git a/process/nettisa.cpp b/process/nettisa.cpp index 01830c599..5f9485a0a 100644 --- a/process/nettisa.cpp +++ b/process/nettisa.cpp @@ -69,7 +69,7 @@ void NETTISAPlugin::update_record( } } -int NETTISAPlugin::post_create(Flow& rec, const Packet& pkt) +ProcessPlugin::FlowAction NETTISAPlugin::post_create(Flow& rec, const Packet& pkt) { RecordExtNETTISA* nettisa_data = new RecordExtNETTISA(); rec.add_extension(nettisa_data); @@ -77,16 +77,16 @@ int NETTISAPlugin::post_create(Flow& rec, const Packet& pkt) nettisa_data->prev_time = timeval2usec(pkt.ts); update_record(nettisa_data, pkt, rec); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } -int NETTISAPlugin::post_update(Flow& rec, const Packet& pkt) +ProcessPlugin::FlowAction NETTISAPlugin::post_update(Flow& rec, const Packet& pkt) { RecordExtNETTISA* nettisa_data = (RecordExtNETTISA*) rec.get_extension(RecordExtNETTISA::REGISTERED_ID); update_record(nettisa_data, pkt, rec); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } void NETTISAPlugin::pre_export(Flow& rec) diff --git a/process/nettisa.hpp b/process/nettisa.hpp index 6f2526bfc..84b2be73b 100644 --- a/process/nettisa.hpp +++ b/process/nettisa.hpp @@ -178,8 +178,8 @@ class NETTISAPlugin : public ProcessPlugin { RecordExt* get_ext() const { return new RecordExtNETTISA(); } ProcessPlugin* copy(); - int post_create(Flow& rec, const Packet& pkt); - int post_update(Flow& rec, const Packet& pkt); + ProcessPlugin::FlowAction post_create(Flow& rec, const Packet& pkt); + ProcessPlugin::FlowAction post_update(Flow& rec, const Packet& pkt); void update_record(RecordExtNETTISA* nettisa_data, const Packet& pkt, const Flow& rec); void pre_export(Flow& rec); }; diff --git a/process/ntp.cpp b/process/ntp.cpp index d339c7c20..30fbcc90c 100644 --- a/process/ntp.cpp +++ b/process/ntp.cpp @@ -85,14 +85,13 @@ ProcessPlugin *NTPPlugin::copy() *\param [in] pkt Parsed packet. *\return 0 on success or FLOW_FLUSH option. */ -int NTPPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction NTPPlugin::post_create(Flow &rec, const Packet &pkt) { if (pkt.dst_port == 123 || pkt.src_port == 123) { add_ext_ntp(rec, pkt); - return FLOW_FLUSH; + return ProcessPlugin::FlowAction::FLUSH; } - - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } /** diff --git a/process/ntp.hpp b/process/ntp.hpp index af3e9a359..b71a015b3 100644 --- a/process/ntp.hpp +++ b/process/ntp.hpp @@ -245,7 +245,7 @@ class NTPPlugin : public ProcessPlugin RecordExt *get_ext() const { return new RecordExtNTP(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); void finish(bool print_stats); private: diff --git a/process/osquery.cpp b/process/osquery.cpp index bd9405c67..8e4881a89 100644 --- a/process/osquery.cpp +++ b/process/osquery.cpp @@ -81,7 +81,7 @@ ProcessPlugin *OSQUERYPlugin::copy() return new OSQUERYPlugin(*this); } -int OSQUERYPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction OSQUERYPlugin::post_create(Flow &rec, const Packet &pkt) { ConvertedFlowData flowDataIPv4(rec.src_ip.v4, rec.dst_ip.v4, rec.src_port, rec.dst_port); @@ -90,9 +90,10 @@ int OSQUERYPlugin::post_create(Flow &rec, const Packet &pkt) rec.add_extension(record); numberOfSuccessfullyRequests++; + return ProcessPlugin::FlowAction::GET_ALL_DATAS; } - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } void OSQUERYPlugin::finish(bool print_stats) diff --git a/process/osquery.hpp b/process/osquery.hpp index 986590bb2..77545c94d 100644 --- a/process/osquery.hpp +++ b/process/osquery.hpp @@ -532,7 +532,7 @@ class OSQUERYPlugin : public ProcessPlugin std::string get_name() const { return "osquery"; } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); void finish(bool print_stats); private: diff --git a/process/ovpn.cpp b/process/ovpn.cpp index 7b2a1fcfe..c73cfe11e 100644 --- a/process/ovpn.cpp +++ b/process/ovpn.cpp @@ -179,20 +179,20 @@ void OVPNPlugin::update_record(RecordExtOVPN* vpn_data, const Packet& pkt) return; } -int OVPNPlugin::post_create(Flow& rec, const Packet& pkt) +ProcessPlugin::FlowAction OVPNPlugin::post_create(Flow& rec, const Packet& pkt) { RecordExtOVPN* vpn_data = new RecordExtOVPN(); rec.add_extension(vpn_data); update_record(vpn_data, pkt); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } -int OVPNPlugin::pre_update(Flow& rec, Packet& pkt) +ProcessPlugin::FlowAction OVPNPlugin::pre_update(Flow& rec, Packet& pkt) { RecordExtOVPN* vpn_data = (RecordExtOVPN*) rec.get_extension(RecordExtOVPN::REGISTERED_ID); update_record(vpn_data, pkt); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } void OVPNPlugin::pre_export(Flow& rec) diff --git a/process/ovpn.hpp b/process/ovpn.hpp index f875eea98..2433058da 100644 --- a/process/ovpn.hpp +++ b/process/ovpn.hpp @@ -121,8 +121,8 @@ class OVPNPlugin : public ProcessPlugin { RecordExt* get_ext() const { return new RecordExtOVPN(); } ProcessPlugin* copy(); - int post_create(Flow& rec, const Packet& pkt); - int pre_update(Flow& rec, Packet& pkt); + ProcessPlugin::FlowAction post_create(Flow& rec, const Packet& pkt); + ProcessPlugin::FlowAction pre_update(Flow& rec, Packet& pkt); void update_record(RecordExtOVPN* vpn_data, const Packet& pkt); void pre_export(Flow& rec); diff --git a/process/passivedns.cpp b/process/passivedns.cpp index 2244cc4e2..99014b6c2 100644 --- a/process/passivedns.cpp +++ b/process/passivedns.cpp @@ -107,22 +107,22 @@ ProcessPlugin *PassiveDNSPlugin::copy() return new PassiveDNSPlugin(*this); } -int PassiveDNSPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction PassiveDNSPlugin::post_create(Flow &rec, const Packet &pkt) { if (pkt.src_port == 53) { return add_ext_dns(reinterpret_cast(pkt.payload), pkt.payload_len, pkt.ip_proto == IPPROTO_TCP, rec); } - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } -int PassiveDNSPlugin::post_update(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction PassiveDNSPlugin::post_update(Flow &rec, const Packet &pkt) { if (pkt.src_port == 53) { return add_ext_dns(reinterpret_cast(pkt.payload), pkt.payload_len, pkt.ip_proto == IPPROTO_TCP, rec); } - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } void PassiveDNSPlugin::finish(bool print_stats) @@ -494,14 +494,14 @@ bool PassiveDNSPlugin::process_ptr_record(std::string name, RecordExtPassiveDNS * \param [in] tcp DNS over tcp. * \param [out] rec Destination Flow. */ -int PassiveDNSPlugin::add_ext_dns(const char *data, unsigned int payload_len, bool tcp, Flow &rec) +ProcessPlugin::FlowAction PassiveDNSPlugin::add_ext_dns(const char *data, unsigned int payload_len, bool tcp, Flow &rec) { RecordExt *tmp = parse_dns(data, payload_len, tcp); if (tmp != nullptr) { rec.add_extension(tmp); } - return FLOW_FLUSH; + return ProcessPlugin::FlowAction::FLUSH; } } diff --git a/process/passivedns.hpp b/process/passivedns.hpp index f9cdd5e2f..1aba22761 100644 --- a/process/passivedns.hpp +++ b/process/passivedns.hpp @@ -168,8 +168,8 @@ class PassiveDNSPlugin : public ProcessPlugin std::string get_name() const { return "passivedns"; } RecordExt *get_ext() const { return new RecordExtPassiveDNS(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); - int post_update(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_update(Flow &rec, const Packet &pkt); void finish(bool print_stats); private: @@ -182,7 +182,7 @@ class PassiveDNSPlugin : public ProcessPlugin uint32_t data_len; /**< Length of packet payload. */ RecordExtPassiveDNS *parse_dns(const char *data, unsigned int payload_len, bool tcp); - int add_ext_dns(const char *data, unsigned int payload_len, bool tcp, Flow &rec); + ProcessPlugin::FlowAction add_ext_dns(const char *data, unsigned int payload_len, bool tcp, Flow &rec); std::string get_name(const char *data) const; size_t get_name_length(const char *data) const; diff --git a/process/phists.cpp b/process/phists.cpp index 7d52855a8..d9f379833 100644 --- a/process/phists.cpp +++ b/process/phists.cpp @@ -154,22 +154,22 @@ void PHISTSPlugin::pre_export(Flow &rec) } } -int PHISTSPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction PHISTSPlugin::post_create(Flow &rec, const Packet &pkt) { RecordExtPHISTS *phists_data = new RecordExtPHISTS(); rec.add_extension(phists_data); update_record(phists_data, pkt); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } -int PHISTSPlugin::post_update(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction PHISTSPlugin::post_update(Flow &rec, const Packet &pkt) { RecordExtPHISTS *phists_data = (RecordExtPHISTS *) rec.get_extension(RecordExtPHISTS::REGISTERED_ID); update_record(phists_data, pkt); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } } diff --git a/process/phists.hpp b/process/phists.hpp index dd351623f..4d8ea17d2 100644 --- a/process/phists.hpp +++ b/process/phists.hpp @@ -193,8 +193,8 @@ class PHISTSPlugin : public ProcessPlugin RecordExt *get_ext() const { return new RecordExtPHISTS(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); - int post_update(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_update(Flow &rec, const Packet &pkt); private: bool use_zeros; diff --git a/process/pstats.cpp b/process/pstats.cpp index 1dac1ee67..da9ed1375 100644 --- a/process/pstats.cpp +++ b/process/pstats.cpp @@ -146,13 +146,13 @@ void PSTATSPlugin::update_record(RecordExtPSTATS *pstats_data, const Packet &pkt } } -int PSTATSPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction PSTATSPlugin::post_create(Flow &rec, const Packet &pkt) { RecordExtPSTATS *pstats_data = new RecordExtPSTATS(); rec.add_extension(pstats_data); update_record(pstats_data, pkt); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } void PSTATSPlugin::pre_export(Flow &rec) @@ -163,14 +163,13 @@ void PSTATSPlugin::pre_export(Flow &rec) if (packets <= PSTATS_MINLEN && (flags & 0x02)) { //tcp SYN set rec.remove_extension(RecordExtPSTATS::REGISTERED_ID); } - } -int PSTATSPlugin::post_update(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction PSTATSPlugin::post_update(Flow &rec, const Packet &pkt) { RecordExtPSTATS *pstats_data = (RecordExtPSTATS *) rec.get_extension(RecordExtPSTATS::REGISTERED_ID); update_record(pstats_data, pkt); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } } diff --git a/process/pstats.hpp b/process/pstats.hpp index 68eceb78b..e4ed64ed2 100644 --- a/process/pstats.hpp +++ b/process/pstats.hpp @@ -218,8 +218,8 @@ class PSTATSPlugin : public ProcessPlugin std::string get_name() const { return "pstats"; } RecordExt *get_ext() const { return new RecordExtPSTATS(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); - int post_update(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_update(Flow &rec, const Packet &pkt); void update_record(RecordExtPSTATS *pstats_data, const Packet &pkt); void pre_export(Flow &rec); diff --git a/process/quic.cpp b/process/quic.cpp index 8f8e9da15..6f2efe2e2 100644 --- a/process/quic.cpp +++ b/process/quic.cpp @@ -383,7 +383,7 @@ int QUICPlugin::process_quic( if (version == QUICParser::QUIC_VERSION::version_negotiation) { set_cid_fields(quic_data, rec, &process_quic, toServer, new_quic_flow, pkt); - return FLOW_FLUSH; + return ProcessPlugin::FlowAction::FLUSH; } // export if parsed CH @@ -483,27 +483,18 @@ int QUICPlugin::process_quic( return QUIC_NOT_DETECTED; } // QUICPlugin::process_quic -int QUICPlugin::pre_create(Packet& pkt) -{ - return 0; -} -int QUICPlugin::post_create(Flow& rec, const Packet& pkt) +ProcessPlugin::FlowAction QUICPlugin::post_create(Flow& rec, const Packet& pkt) { return add_quic(rec, pkt); } -int QUICPlugin::pre_update(Flow& rec, Packet& pkt) -{ - return 0; -} - -int QUICPlugin::post_update(Flow& rec, const Packet& pkt) +ProcessPlugin::FlowAction QUICPlugin::post_update(Flow& rec, const Packet& pkt) { return add_quic(rec, pkt); } -int QUICPlugin::add_quic(Flow& rec, const Packet& pkt) +ProcessPlugin::FlowAction QUICPlugin::add_quic(Flow& rec, const Packet& pkt) { RecordExtQUIC* q_ptr = (RecordExtQUIC*) rec.get_extension(RecordExtQUIC::REGISTERED_ID); bool new_qptr = false; @@ -514,7 +505,7 @@ int QUICPlugin::add_quic(Flow& rec, const Packet& pkt) int ret = process_quic(q_ptr, rec, pkt, new_qptr); // Test if QUIC extension is not set - if (new_qptr && ((ret == QUIC_DETECTED) || (ret == FLOW_FLUSH))) { + if (new_qptr && ((ret == QUIC_DETECTED) || (ret == ProcessPlugin::FlowAction::FLUSH))) { rec.add_extension(q_ptr); } if (new_qptr && (ret == QUIC_NOT_DETECTED)) { @@ -523,9 +514,9 @@ int QUICPlugin::add_quic(Flow& rec, const Packet& pkt) } // Correct if QUIC has already been detected if (!new_qptr && (ret == QUIC_NOT_DETECTED)) { - return QUIC_DETECTED; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } - return ret; + return ProcessPlugin::FlowAction::NO_PROCESS; } void QUICPlugin::finish(bool print_stats) diff --git a/process/quic.hpp b/process/quic.hpp index 44db85515..b76836be3 100644 --- a/process/quic.hpp +++ b/process/quic.hpp @@ -374,11 +374,9 @@ class QUICPlugin : public ProcessPlugin { ProcessPlugin* copy(); - int pre_create(Packet& pkt); - int post_create(Flow& rec, const Packet& pkt); - int pre_update(Flow& rec, Packet& pkt); - int post_update(Flow& rec, const Packet& pkt); - int add_quic(Flow& rec, const Packet& pkt); + ProcessPlugin::FlowAction post_create(Flow& rec, const Packet& pkt); + ProcessPlugin::FlowAction post_update(Flow& rec, const Packet& pkt); + ProcessPlugin::FlowAction add_quic(Flow& rec, const Packet& pkt); void finish(bool print_stats); void set_packet_type(RecordExtQUIC* quic_data, Flow& rec, uint8_t packets); diff --git a/process/rtsp.cpp b/process/rtsp.cpp index f76119b7a..f7aef40be 100644 --- a/process/rtsp.cpp +++ b/process/rtsp.cpp @@ -94,19 +94,21 @@ ProcessPlugin *RTSPPlugin::copy() return new RTSPPlugin(*this); } -int RTSPPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction RTSPPlugin::post_create(Flow &rec, const Packet &pkt) { const char *payload = reinterpret_cast(pkt.payload); if (is_request(payload, pkt.payload_len)) { add_ext_rtsp_request(payload, pkt.payload_len, rec); + return ProcessPlugin::FlowAction::GET_ALL_DATA; } else if (is_response(payload, pkt.payload_len)) { add_ext_rtsp_response(payload, pkt.payload_len, rec); + return ProcessPlugin::FlowAction::GET_ALL_DATA; } - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } -int RTSPPlugin::pre_update(Flow &rec, Packet &pkt) +ProcessPlugin::FlowAction RTSPPlugin::pre_update(Flow &rec, Packet &pkt) { RecordExt *ext = nullptr; const char *payload = reinterpret_cast(pkt.payload); @@ -114,29 +116,29 @@ int RTSPPlugin::pre_update(Flow &rec, Packet &pkt) ext = rec.get_extension(RecordExtRTSP::REGISTERED_ID); if (ext == nullptr) { /* Check if header is present in flow. */ add_ext_rtsp_request(payload, pkt.payload_len, rec); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } parse_rtsp_request(payload, pkt.payload_len, static_cast(ext)); if (flow_flush) { flow_flush = false; - return FLOW_FLUSH_WITH_REINSERT; + return ProcessPlugin::FlowAction::FLUSH_WITH_REINSERT; } } else if (is_response(payload, pkt.payload_len)) { ext = rec.get_extension(RecordExtRTSP::REGISTERED_ID); if (ext == nullptr) { /* Check if header is present in flow. */ add_ext_rtsp_response(payload, pkt.payload_len, rec); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } parse_rtsp_response(payload, pkt.payload_len, static_cast(ext)); if (flow_flush) { flow_flush = false; - return FLOW_FLUSH_WITH_REINSERT; + return ProcessPlugin::FlowAction::FLUSH_WITH_REINSERT; } } - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } void RTSPPlugin::finish(bool print_stats) diff --git a/process/rtsp.hpp b/process/rtsp.hpp index 943b6cf4f..01a125046 100644 --- a/process/rtsp.hpp +++ b/process/rtsp.hpp @@ -202,8 +202,8 @@ class RTSPPlugin : public ProcessPlugin RecordExt *get_ext() const { return new RecordExtRTSP(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); - int pre_update(Flow &rec, Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction pre_update(Flow &rec, Packet &pkt); void finish(bool print_stats); private: diff --git a/process/sip.cpp b/process/sip.cpp index 67505a7f4..2a23ce2c1 100644 --- a/process/sip.cpp +++ b/process/sip.cpp @@ -69,13 +69,13 @@ ProcessPlugin *SIPPlugin::copy() return new SIPPlugin(*this); } -int SIPPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction SIPPlugin::post_create(Flow &rec, const Packet &pkt) { uint16_t msg_type; msg_type = parse_msg_type(pkt); if (msg_type == SIP_MSG_TYPE_INVALID) { - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } RecordExtSIP *sip_data = new RecordExtSIP(); @@ -83,19 +83,19 @@ int SIPPlugin::post_create(Flow &rec, const Packet &pkt) rec.add_extension(sip_data); parser_process_sip(pkt, sip_data); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } -int SIPPlugin::pre_update(Flow &rec, Packet &pkt) +ProcessPlugin::FlowAction SIPPlugin::pre_update(Flow &rec, Packet &pkt) { uint16_t msg_type; msg_type = parse_msg_type(pkt); if (msg_type != SIP_MSG_TYPE_INVALID) { - return FLOW_FLUSH_WITH_REINSERT; + return ProcessPlugin::FlowAction::FLUSH_WITH_REINSERT; } - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } void SIPPlugin::finish(bool print_stats) diff --git a/process/sip.hpp b/process/sip.hpp index 7ab979121..9b1b797aa 100644 --- a/process/sip.hpp +++ b/process/sip.hpp @@ -496,8 +496,8 @@ class SIPPlugin : public ProcessPlugin { std::string get_name() const { return "sip"; } RecordExt *get_ext() const { return new RecordExtSIP(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); - int pre_update(Flow &rec, Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction pre_update(Flow &rec, Packet &pkt); void finish(bool print_stats); private: diff --git a/process/smtp.cpp b/process/smtp.cpp index fca627fc9..2aef0d138 100644 --- a/process/smtp.cpp +++ b/process/smtp.cpp @@ -66,27 +66,29 @@ ProcessPlugin *SMTPPlugin::copy() return new SMTPPlugin(*this); } -int SMTPPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction SMTPPlugin::post_create(Flow &rec, const Packet &pkt) { if (pkt.src_port == 25 || pkt.dst_port == 25) { create_smtp_record(rec, pkt); + return ProcessPlugin::FlowAction::GET_ALL_DATA; } - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } -int SMTPPlugin::pre_update(Flow &rec, Packet &pkt) +ProcessPlugin::FlowAction SMTPPlugin::pre_update(Flow &rec, Packet &pkt) { if (pkt.src_port == 25 || pkt.dst_port == 25) { RecordExt *ext = rec.get_extension(RecordExtSMTP::REGISTERED_ID); if (ext == nullptr) { create_smtp_record(rec, pkt); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } update_smtp_record(static_cast(ext), pkt); + return ProcessPlugin::FlowAction::GET_ALL_DATA; } - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } char *strncasestr(const char *str, size_t n, const char *substr) diff --git a/process/smtp.hpp b/process/smtp.hpp index 6c16ee995..45fc9bc0d 100644 --- a/process/smtp.hpp +++ b/process/smtp.hpp @@ -241,8 +241,8 @@ class SMTPPlugin : public ProcessPlugin RecordExt *get_ext() const { return new RecordExtSMTP(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); - int pre_update(Flow &rec, Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction pre_update(Flow &rec, Packet &pkt); void finish(bool print_stats); bool smtp_keyword(const char *data); diff --git a/process/ssadetector.cpp b/process/ssadetector.cpp index 1399bafe2..0361f4ee0 100644 --- a/process/ssadetector.cpp +++ b/process/ssadetector.cpp @@ -119,11 +119,11 @@ void SSADetectorPlugin::update_record(RecordExtSSADetector* record, const Packet } -int SSADetectorPlugin::post_update(Flow& rec, const Packet& pkt) +ProcessPlugin::FlowAction SSADetectorPlugin::post_update(Flow& rec, const Packet& pkt) { RecordExtSSADetector *record = nullptr; if (rec.src_packets + rec.dst_packets < MIN_PKT_IN_FLOW) { - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } record = (RecordExtSSADetector *) rec.get_extension(RecordExtSSADetector::REGISTERED_ID); @@ -133,7 +133,7 @@ int SSADetectorPlugin::post_update(Flow& rec, const Packet& pkt) } update_record(record, pkt); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } double classes_ratio(uint8_t* syn_pkts, uint8_t size) diff --git a/process/ssadetector.hpp b/process/ssadetector.hpp index a83f783bd..64116462d 100644 --- a/process/ssadetector.hpp +++ b/process/ssadetector.hpp @@ -174,7 +174,7 @@ class SSADetectorPlugin : public ProcessPlugin { RecordExt* get_ext() const { return new RecordExtSSADetector(); } ProcessPlugin* copy(); - int post_update(Flow& rec, const Packet& pkt); + ProcessPlugin::FlowAction post_update(Flow& rec, const Packet& pkt); void pre_export(Flow& rec); void update_record(RecordExtSSADetector* record, const Packet& pkt); static inline void diff --git a/process/ssdp.cpp b/process/ssdp.cpp index 6ca74f323..386c0fe49 100644 --- a/process/ssdp.cpp +++ b/process/ssdp.cpp @@ -90,7 +90,7 @@ ProcessPlugin *SSDPPlugin::copy() return new SSDPPlugin(*this); } -int SSDPPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction SSDPPlugin::post_create(Flow &rec, const Packet &pkt) { if (pkt.dst_port == 1900) { record = new RecordExtSSDP(); @@ -98,16 +98,18 @@ int SSDPPlugin::post_create(Flow &rec, const Packet &pkt) record = nullptr; parse_ssdp_message(rec, pkt); + return ProcessPlugin::FlowAction::GET_ALL_DATA; } - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } -int SSDPPlugin::pre_update(Flow &rec, Packet &pkt) +ProcessPlugin::FlowAction SSDPPlugin::pre_update(Flow &rec, Packet &pkt) { if (pkt.dst_port == 1900) { parse_ssdp_message(rec, pkt); + return ProcessPlugin::FlowAction::GET_ALL_DATA; } - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } void SSDPPlugin::finish(bool print_stats) diff --git a/process/ssdp.hpp b/process/ssdp.hpp index c95276c5a..504f35316 100644 --- a/process/ssdp.hpp +++ b/process/ssdp.hpp @@ -181,8 +181,8 @@ class SSDPPlugin : public ProcessPlugin RecordExt *get_ext() const { return new RecordExtSSDP(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); - int pre_update(Flow &rec, Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction pre_update(Flow &rec, Packet &pkt); void finish(bool print_stats); /** diff --git a/process/stats.cpp b/process/stats.cpp index d6fb23da1..b86a86907 100644 --- a/process/stats.cpp +++ b/process/stats.cpp @@ -82,21 +82,21 @@ ProcessPlugin *StatsPlugin::copy() return new StatsPlugin(*this); } -int StatsPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction StatsPlugin::post_create(Flow &rec, const Packet &pkt) { m_packets += 1; m_new_flows += 1; m_flows_in_cache += 1; check_timestamp(pkt); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } -int StatsPlugin::post_update(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction StatsPlugin::post_update(Flow &rec, const Packet &pkt) { m_packets += 1; m_cache_hits += 1; check_timestamp(pkt); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } void StatsPlugin::pre_export(Flow &rec) diff --git a/process/stats.hpp b/process/stats.hpp index b3ff2882b..3d884380b 100644 --- a/process/stats.hpp +++ b/process/stats.hpp @@ -71,8 +71,8 @@ class StatsPlugin : public ProcessPlugin std::string get_name() const { return "stats"; } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); - int post_update(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_update(Flow &rec, const Packet &pkt); void pre_export(Flow &rec); void finish(bool print_stats); diff --git a/process/tls.cpp b/process/tls.cpp index b47c0dfbc..3744f29fb 100644 --- a/process/tls.cpp +++ b/process/tls.cpp @@ -73,13 +73,12 @@ ProcessPlugin *TLSPlugin::copy() return new TLSPlugin(*this); } -int TLSPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction TLSPlugin::post_create(Flow &rec, const Packet &pkt) { - add_tls_record(rec, pkt); - return 0; + return add_tls_record(rec, pkt); } -int TLSPlugin::pre_update(Flow &rec, Packet &pkt) +ProcessPlugin::FlowAction TLSPlugin::pre_update(Flow &rec, Packet &pkt) { RecordExtTLS *ext = static_cast(rec.get_extension(RecordExtTLS::REGISTERED_ID)); @@ -88,11 +87,9 @@ int TLSPlugin::pre_update(Flow &rec, Packet &pkt) // Add ALPN from server packet parse_tls(pkt.payload, pkt.payload_len, ext); } - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } - add_tls_record(rec, pkt); - - return 0; + return add_tls_record(rec, pkt); } bool TLSPlugin::obtain_tls_data(TLSData &payload, RecordExtTLS *rec, std::string &ja3, uint8_t hs_type) @@ -217,7 +214,7 @@ bool TLSPlugin::parse_tls(const uint8_t *data, uint16_t payload_len, RecordExtTL return payload.obejcts_parsed != 0 || !ja3.empty(); } // TLSPlugin::parse_sni -void TLSPlugin::add_tls_record(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction TLSPlugin::add_tls_record(Flow &rec, const Packet &pkt) { if (ext_ptr == nullptr) { ext_ptr = new RecordExtTLS(); @@ -234,6 +231,7 @@ void TLSPlugin::add_tls_record(Flow &rec, const Packet &pkt) rec.add_extension(ext_ptr); ext_ptr = nullptr; } + return ProcessPlugin::FlowAction::GET_ALL_DATA; } void TLSPlugin::finish(bool print_stats) diff --git a/process/tls.hpp b/process/tls.hpp index 3ab6e454b..dd4221dab 100644 --- a/process/tls.hpp +++ b/process/tls.hpp @@ -230,12 +230,12 @@ class TLSPlugin : public ProcessPlugin ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); - int pre_update(Flow &rec, Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction pre_update(Flow &rec, Packet &pkt); void finish(bool print_stats); private: - void add_tls_record(Flow&, const Packet&); + ProcessPlugin::FlowAction add_tls_record(Flow&, const Packet&); bool parse_tls(const uint8_t *, uint16_t, RecordExtTLS *); bool obtain_tls_data(TLSData&, RecordExtTLS *, std::string&, uint8_t); diff --git a/process/vlan.cpp b/process/vlan.cpp index 165347858..21f862324 100644 --- a/process/vlan.cpp +++ b/process/vlan.cpp @@ -46,12 +46,12 @@ ProcessPlugin *VLANPlugin::copy() return new VLANPlugin(*this); } -int VLANPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction VLANPlugin::post_create(Flow &rec, const Packet &pkt) { auto ext = new RecordExtVLAN(); ext->vlan_id = pkt.vlan_id; rec.add_extension(ext); - return 0; + return ProcessPlugin::FlowAction::GET_ALL_DATA; } } diff --git a/process/vlan.hpp b/process/vlan.hpp index a69eb5296..347c58c8b 100644 --- a/process/vlan.hpp +++ b/process/vlan.hpp @@ -117,7 +117,7 @@ class VLANPlugin : public ProcessPlugin RecordExt *get_ext() const { return new RecordExtVLAN(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); }; } diff --git a/process/wg.cpp b/process/wg.cpp index 6d191fef8..bc76a70c8 100644 --- a/process/wg.cpp +++ b/process/wg.cpp @@ -70,16 +70,17 @@ ProcessPlugin *WGPlugin::copy() return new WGPlugin(*this); } -int WGPlugin::post_create(Flow &rec, const Packet &pkt) +ProcessPlugin::FlowAction WGPlugin::post_create(Flow &rec, const Packet &pkt) { if (pkt.ip_proto == IPPROTO_UDP) { add_ext_wg(reinterpret_cast(pkt.payload), pkt.payload_len, pkt.source_pkt, rec); + return ProcessPlugin::FlowAction::GET_ALL_DATA; } - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } -int WGPlugin::pre_update(Flow &rec, Packet &pkt) +ProcessPlugin::FlowAction WGPlugin::pre_update(Flow &rec, Packet &pkt) { RecordExtWG *vpn_data = (RecordExtWG *) rec.get_extension(RecordExtWG::REGISTERED_ID); if (vpn_data != nullptr && vpn_data->possible_wg) { @@ -87,15 +88,16 @@ int WGPlugin::pre_update(Flow &rec, Packet &pkt) // In case of new flow, flush if (flow_flush) { flow_flush = false; - return FLOW_FLUSH_WITH_REINSERT; + return ProcessPlugin::FlowAction::FLUSH_WITH_REINSERT; } // In other cases, when WG was not detected if (!res) { vpn_data->possible_wg = 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } + return ProcessPlugin::FlowAction::GET_ALL_DATA; } - - return 0; + return ProcessPlugin::FlowAction::NO_PROCESS; } void WGPlugin::pre_export(Flow &rec) diff --git a/process/wg.hpp b/process/wg.hpp index 175ceffce..35552c277 100644 --- a/process/wg.hpp +++ b/process/wg.hpp @@ -158,8 +158,8 @@ class WGPlugin : public ProcessPlugin RecordExt *get_ext() const { return new RecordExtWG(); } ProcessPlugin *copy(); - int post_create(Flow &rec, const Packet &pkt); - int pre_update(Flow &rec, Packet &pkt); + ProcessPlugin::FlowAction post_create(Flow &rec, const Packet &pkt); + ProcessPlugin::FlowAction pre_update(Flow &rec, Packet &pkt); void pre_export(Flow &rec); void finish(bool print_stats); diff --git a/storage/cache.cpp b/storage/cache.cpp index 86a5b49b6..26f94fc91 100644 --- a/storage/cache.cpp +++ b/storage/cache.cpp @@ -29,15 +29,19 @@ * * */ +#include "cache.hpp" +#include #include #include #include +#include #include +#include -#include -#include "cache.hpp" #include "xxhash.h" +#include "fragmentationCache/timevalUtils.hpp" +#include "cacheRowSpan.hpp" namespace ipxp { @@ -47,132 +51,53 @@ __attribute__((constructor)) static void register_this_plugin() register_plugin(&rec); } -FlowRecord::FlowRecord() -{ - erase(); -}; - -FlowRecord::~FlowRecord() -{ - erase(); -}; - -void FlowRecord::erase() -{ - m_flow.remove_extensions(); - m_hash = 0; - - memset(&m_flow.time_first, 0, sizeof(m_flow.time_first)); - memset(&m_flow.time_last, 0, sizeof(m_flow.time_last)); - m_flow.ip_version = 0; - m_flow.ip_proto = 0; - memset(&m_flow.src_ip, 0, sizeof(m_flow.src_ip)); - memset(&m_flow.dst_ip, 0, sizeof(m_flow.dst_ip)); - m_flow.src_port = 0; - m_flow.dst_port = 0; - m_flow.src_packets = 0; - m_flow.dst_packets = 0; - m_flow.src_bytes = 0; - m_flow.dst_bytes = 0; - m_flow.src_tcp_flags = 0; - m_flow.dst_tcp_flags = 0; -} -void FlowRecord::reuse() -{ - m_flow.remove_extensions(); - m_flow.time_first = m_flow.time_last; - m_flow.src_packets = 0; - m_flow.dst_packets = 0; - m_flow.src_bytes = 0; - m_flow.dst_bytes = 0; - m_flow.src_tcp_flags = 0; - m_flow.dst_tcp_flags = 0; -} - -inline __attribute__((always_inline)) bool FlowRecord::is_empty() const +OptionsParser * NHTFlowCache::get_parser() const { - return m_hash == 0; + return new CacheOptParser(); } -inline __attribute__((always_inline)) bool FlowRecord::belongs(uint64_t hash) const +std::string NHTFlowCache::get_name() const noexcept { - return hash == m_hash; + return "cache"; } -void FlowRecord::create(const Packet &pkt, uint64_t hash) +NHTFlowCache::NHTFlowCache() : + m_cache_size(0), m_line_size(0), m_line_mask(0), m_new_flow_insert_index(0), + m_queue_size(0), m_active(0), m_inactive(0), + m_split_biflow(false), m_enable_fragmentation_cache(true), + m_fragmentation_cache(0, 0) { - m_flow.src_packets = 1; - - m_hash = hash; - - m_flow.time_first = pkt.ts; - m_flow.time_last = pkt.ts; - m_flow.flow_hash = hash; - - memcpy(m_flow.src_mac, pkt.src_mac, 6); - memcpy(m_flow.dst_mac, pkt.dst_mac, 6); - - if (pkt.ip_version == IP::v4) { - m_flow.ip_version = pkt.ip_version; - m_flow.ip_proto = pkt.ip_proto; - m_flow.src_ip.v4 = pkt.src_ip.v4; - m_flow.dst_ip.v4 = pkt.dst_ip.v4; - m_flow.src_bytes = pkt.ip_len; - } else if (pkt.ip_version == IP::v6) { - m_flow.ip_version = pkt.ip_version; - m_flow.ip_proto = pkt.ip_proto; - memcpy(m_flow.src_ip.v6, pkt.src_ip.v6, 16); - memcpy(m_flow.dst_ip.v6, pkt.dst_ip.v6, 16); - m_flow.src_bytes = pkt.ip_len; - } - - if (pkt.ip_proto == IPPROTO_TCP) { - m_flow.src_port = pkt.src_port; - m_flow.dst_port = pkt.dst_port; - m_flow.src_tcp_flags = pkt.tcp_flags; - } else if (pkt.ip_proto == IPPROTO_UDP) { - m_flow.src_port = pkt.src_port; - m_flow.dst_port = pkt.dst_port; - } else if (pkt.ip_proto == IPPROTO_ICMP || - pkt.ip_proto == IPPROTO_ICMPV6) { - m_flow.src_port = pkt.src_port; - m_flow.dst_port = pkt.dst_port; - } } -void FlowRecord::update(const Packet &pkt, bool src) +NHTFlowCache::~NHTFlowCache() { - m_flow.time_last = pkt.ts; - if (src) { - m_flow.src_packets++; - m_flow.src_bytes += pkt.ip_len; - - if (pkt.ip_proto == IPPROTO_TCP) { - m_flow.src_tcp_flags |= pkt.tcp_flags; - } - } else { - m_flow.dst_packets++; - m_flow.dst_bytes += pkt.ip_len; - - if (pkt.ip_proto == IPPROTO_TCP) { - m_flow.dst_tcp_flags |= pkt.tcp_flags; - } - } + NHTFlowCache::close(); + print_report(); } - -NHTFlowCache::NHTFlowCache() : - m_cache_size(0), m_line_size(0), m_line_mask(0), m_line_new_idx(0), - m_qsize(0), m_qidx(0), m_timeout_idx(0), m_active(0), m_inactive(0), - m_split_biflow(false), m_enable_fragmentation_cache(true), m_keylen(0), - m_key(), m_key_inv(), m_flow_table(nullptr), m_flow_records(nullptr), - m_fragmentation_cache(0, 0) +void NHTFlowCache::get_parser_options(CacheOptParser& parser) noexcept { + m_cache_size = parser.m_cache_size; + m_line_size = parser.m_line_size; + m_active = parser.m_active; + m_inactive = parser.m_inactive; + m_line_mask = (m_cache_size - 1) & ~(m_line_size - 1); + m_new_flow_insert_index = m_line_size / 2; + m_split_biflow = parser.m_split_biflow; + m_enable_fragmentation_cache = parser.m_enable_fragmentation_cache; } -NHTFlowCache::~NHTFlowCache() +void NHTFlowCache::allocate_table() { - close(); + try { + m_flow_table.resize(m_cache_size + m_queue_size); + m_flows.resize(m_cache_size + m_queue_size); + std::for_each(m_flow_table.begin(), m_flow_table.end(), [index = 0, this](FlowRecord*& flow) mutable { + flow = &m_flows[index++]; + }); + } catch (std::bad_alloc &e) { + throw PluginError("not enough memory for flow cache allocation"); + } } void NHTFlowCache::init(const char *params) @@ -184,38 +109,17 @@ void NHTFlowCache::init(const char *params) throw PluginError(e.what()); } - m_cache_size = parser.m_cache_size; - m_line_size = parser.m_line_size; - m_active = parser.m_active; - m_inactive = parser.m_inactive; - m_qidx = 0; - m_timeout_idx = 0; - m_line_mask = (m_cache_size - 1) & ~(m_line_size - 1); - m_line_new_idx = m_line_size / 2; - + get_parser_options(parser); if (m_export_queue == nullptr) { throw PluginError("output queue must be set before init"); } - if (m_line_size > m_cache_size) { throw PluginError("flow cache line size must be greater or equal to cache size"); } if (m_cache_size == 0) { throw PluginError("flow cache won't properly work with 0 records"); } - - try { - m_flow_table = new FlowRecord*[m_cache_size + m_qsize]; - m_flow_records = new FlowRecord[m_cache_size + m_qsize]; - for (decltype(m_cache_size + m_qsize) i = 0; i < m_cache_size + m_qsize; i++) { - m_flow_table[i] = m_flow_records + i; - } - } catch (std::bad_alloc &e) { - throw PluginError("not enough memory for flow cache allocation"); - } - - m_split_biflow = parser.m_split_biflow; - m_enable_fragmentation_cache = parser.m_enable_fragmentation_cache; + allocate_table(); if (m_enable_fragmentation_cache) { try { @@ -224,358 +128,444 @@ void NHTFlowCache::init(const char *params) throw PluginError("not enough memory for fragment cache allocation"); } } - -#ifdef FLOW_CACHE_STATS - m_empty = 0; - m_not_empty = 0; - m_hits = 0; - m_expired = 0; - m_flushed = 0; - m_lookups = 0; - m_lookups2 = 0; -#endif /* FLOW_CACHE_STATS */ +#ifdef WITH_CTT + if (m_ctt_device.empty()) { + throw PluginError("CTT device must be set before init"); + } + m_ctt_controller.init(m_ctt_device, m_ctt_comp_index); +#endif /* WITH_CTT */ } void NHTFlowCache::close() { - if (m_flow_records != nullptr) { - delete [] m_flow_records; - m_flow_records = nullptr; - } - if (m_flow_table != nullptr) { - delete [] m_flow_table; - m_flow_table = nullptr; - } + m_flows.clear(); + m_flow_table.clear(); } void NHTFlowCache::set_queue(ipx_ring_t *queue) { m_export_queue = queue; - m_qsize = ipx_ring_size(queue); + m_queue_size = ipx_ring_size(queue); +} + +void NHTFlowCache::export_flow(size_t flow_index) +{ + export_flow(flow_index, get_export_reason(m_flow_table[flow_index]->m_flow)); +} + +void NHTFlowCache::export_flow(size_t flow_index, int reason) +{ + m_flow_table[flow_index]->m_flow.end_reason = reason; + update_flow_record_stats(m_flow_table[flow_index]->m_flow.src_packets + m_flow_table[flow_index]->m_flow.dst_packets); + update_flow_end_reason_stats(m_flow_table[flow_index]->m_flow.end_reason); + m_cache_stats.exported++; + push_to_export_queue(flow_index); + m_flow_table[flow_index]->erase(); + m_cache_stats.flows_in_cache--; + m_cache_stats.total_exported++; } -void NHTFlowCache::export_flow(size_t index) +void NHTFlowCache::push_to_export_queue(size_t flow_index) noexcept { - m_total_exported++; - update_flow_end_reason_stats(m_flow_table[index]->m_flow.end_reason); - update_flow_record_stats( - m_flow_table[index]->m_flow.src_packets - + m_flow_table[index]->m_flow.dst_packets); - m_flows_in_cache--; - - ipx_ring_push(m_export_queue, &m_flow_table[index]->m_flow); - std::swap(m_flow_table[index], m_flow_table[m_cache_size + m_qidx]); - m_flow_table[index]->erase(); - m_qidx = (m_qidx + 1) % m_qsize; + ipx_ring_push(m_export_queue, &m_flow_table[flow_index]->m_flow); + std::swap(m_flow_table[flow_index], m_flow_table[m_cache_size + m_queue_index]); + m_queue_index = (m_queue_index + 1) % m_queue_size; } void NHTFlowCache::finish() { + /*auto it = std::find_if(m_hashes_in_ctt.begin(), m_hashes_in_ctt.end(), [](const auto& pair) { + return pair.second <= 0; + });*/ for (decltype(m_cache_size) i = 0; i < m_cache_size; i++) { if (!m_flow_table[i]->is_empty()) { +#ifdef WITH_CTT + if (m_flow_table[i]->is_in_ctt && !m_flow_table[i]->is_waiting_for_export) { + send_export_request_to_ctt(m_flow_table[i]->m_flow.flow_hash_ctt); + } +#endif /* WITH_CTT */ plugins_pre_export(m_flow_table[i]->m_flow); - m_flow_table[i]->m_flow.end_reason = FLOW_END_FORCED; - export_flow(i); -#ifdef FLOW_CACHE_STATS - m_expired++; -#endif /* FLOW_CACHE_STATS */ + export_flow(i, FLOW_END_FORCED); } } + /*if (m_hashes_in_ctt.size() > 0){ + throw "bad CTT size"; + } + std::cout << "CTT hash collisions: " << m_ctt_hash_collision << std::endl;*/ } -void NHTFlowCache::flush(Packet &pkt, size_t flow_index, int ret, bool source_flow) +void NHTFlowCache::flush(Packet &pkt, size_t flow_index, int return_flags) { -#ifdef FLOW_CACHE_STATS - m_flushed++; -#endif /* FLOW_CACHE_STATS */ + m_cache_stats.flushed++; - if (ret == FLOW_FLUSH_WITH_REINSERT) { - FlowRecord *flow = m_flow_table[flow_index]; - flow->m_flow.end_reason = FLOW_END_FORCED; - ipx_ring_push(m_export_queue, &flow->m_flow); + if (return_flags == ProcessPlugin::FlowAction::FLUSH_WITH_REINSERT) { +#ifdef WITH_CTT + if (m_flow_table[flow_index]->is_in_ctt && !m_flow_table[flow_index]->is_waiting_for_export) { + m_flow_table[flow_index]->is_waiting_for_export = true; + send_export_request_to_ctt(m_flow_table[flow_index]->m_flow.flow_hash_ctt); + } +#endif /* WITH_CTT */ + push_to_export_queue(flow_index); + m_flow_table[flow_index]->m_flow.remove_extensions(); + *m_flow_table[flow_index] = *m_flow_table[m_cache_size + m_queue_index]; + m_flow_table[flow_index]->m_flow.m_exts = nullptr; + m_flow_table[flow_index]->reuse(); // Clean counters, set time first to last + m_flow_table[flow_index]->update(pkt); // Set new counters from packet + + const size_t post_create_return_flags = plugins_post_create(m_flow_table[flow_index]->m_flow, pkt); + if (post_create_return_flags & ProcessPlugin::FlowAction::FLUSH) { + flush(pkt, flow_index, post_create_return_flags); + } + return; + } + try_to_export(flow_index, false, pkt.ts, FLOW_END_FORCED); +} - std::swap(m_flow_table[flow_index], m_flow_table[m_cache_size + m_qidx]); +std::tuple, std::optional, bool> NHTFlowCache::find_flow_index(const Packet& packet) noexcept +{ + if (!create_hash_key(packet)) { + return {std::nullopt, std::nullopt, false}; + } - flow = m_flow_table[flow_index]; - flow->m_flow.remove_extensions(); - *flow = *m_flow_table[m_cache_size + m_qidx]; - m_qidx = (m_qidx + 1) % m_qsize; + const auto key_hasher = [](const auto& key) + { + return XXH64(&key, sizeof(key), 0); + }; + + const size_t direct_hash_value = std::visit(key_hasher, m_key); + const size_t first_flow_in_raw = direct_hash_value & m_line_mask; + const CacheRowSpan raw_span_direct(&m_flow_table[first_flow_in_raw], m_line_size); + std::optional flow_index = raw_span_direct.find_by_hash(direct_hash_value); + if (flow_index.has_value()) { + return {direct_hash_value, flow_index.value(), true}; + } - flow->m_flow.m_exts = nullptr; - flow->reuse(); // Clean counters, set time first to last - flow->update(pkt, source_flow); // Set new counters from packet + const size_t reversed_hash_value = std::visit(key_hasher, m_key_reversed); + const size_t first_flow_in_raw_reversed = reversed_hash_value & m_line_mask; + const CacheRowSpan raw_span_reverse(&m_flow_table[first_flow_in_raw_reversed], m_line_size); + flow_index = raw_span_reverse.find_by_hash(reversed_hash_value); + if (flow_index.has_value()) { + return {reversed_hash_value, flow_index.value(), false}; + } - ret = plugins_post_create(flow->m_flow, pkt); - if (ret & FLOW_FLUSH) { - flush(pkt, flow_index, ret, source_flow); - } - } else { - m_flow_table[flow_index]->m_flow.end_reason = FLOW_END_FORCED; + return {direct_hash_value, std::nullopt, true}; +} + +static bool is_tcp_connection_restart(const Packet& packet, const Flow& flow) noexcept +{ + constexpr uint8_t TCP_FIN = 0x01; + constexpr uint8_t TCP_RST = 0x04; + constexpr uint8_t TCP_SYN = 0x02; + const uint8_t flags = packet.source_pkt ? flow.src_tcp_flags : flow.dst_tcp_flags; + return packet.tcp_flags & TCP_SYN && (flags & (TCP_FIN | TCP_RST)); +} + +bool NHTFlowCache::try_to_export_on_inactive_timeout(size_t flow_index, const timeval& now) noexcept +{ + if (!m_flow_table[flow_index]->is_empty() && now.tv_sec - m_flow_table[flow_index]->m_flow.time_last.tv_sec >= m_inactive) { + return try_to_export(flow_index, false, now); + } + return false; +} + +bool NHTFlowCache::needs_to_be_offloaded(size_t flow_index) const noexcept +{ + return only_metadata_required(m_flow_table[flow_index]->m_flow) && m_flow_table[flow_index]->m_flow.src_packets + m_flow_table[flow_index]->m_flow.dst_packets > 30; +} + + +void NHTFlowCache::create_record(const Packet& packet, size_t flow_index, size_t hash_value) noexcept +{ + m_cache_stats.flows_in_cache++; + m_flow_table[flow_index]->create(packet, hash_value); + const size_t post_create_return_flags = plugins_post_create(m_flow_table[flow_index]->m_flow, packet); + if (post_create_return_flags & ProcessPlugin::FlowAction::FLUSH) { export_flow(flow_index); + m_cache_stats.flushed++; + return; + } +#ifdef WITH_CTT + // if metadata are valid, add flow hash ctt to the flow record + if (!packet.cttmeta_valid) { + return; } + m_flow_table[flow_index]->m_flow.flow_hash_ctt = packet.cttmeta.flow_hash; + if (needs_to_be_offloaded(flow_index)) { + /*m_hashes_in_ctt[m_flow_table[flow_index]->m_flow.flow_hash_ctt]++; + if (m_hashes_in_ctt[m_flow_table[flow_index]->m_flow.flow_hash_ctt] >= 2) { + m_ctt_hash_collision++; + std::vector filtered; + + std::copy_if(m_flow_table.begin(), m_flow_table.end(), std::back_inserter(filtered), + [&](FlowRecord* flow) { return flow->m_flow.flow_hash_ctt == m_flow_table[flow_index]->m_flow.flow_hash_ctt; }); + filtered.size(); + } + auto x = m_hashes_in_ctt[m_flow_table[flow_index]->m_flow.flow_hash_ctt];*/ + m_ctt_controller.create_record(m_flow_table[flow_index]->m_flow.flow_hash_ctt, m_flow_table[flow_index]->m_flow.time_first); + m_cache_stats.ctt_offloaded++; + m_flow_table[flow_index]->is_in_ctt = true; + } +#endif /* WITH_CTT */ } -int NHTFlowCache::put_pkt(Packet &pkt) +#ifdef WITH_CTT +void NHTFlowCache::try_to_add_flow_to_ctt(size_t flow_index) noexcept { - int ret = plugins_pre_create(pkt); + if (m_flow_table[flow_index]->is_in_ctt || m_flow_table[flow_index]->m_flow.flow_hash_ctt == 0) { + return; + } + if (needs_to_be_offloaded(flow_index)) { + /*m_hashes_in_ctt[m_flow_table[flow_index]->m_flow.flow_hash_ctt]++; + auto x = m_hashes_in_ctt[m_flow_table[flow_index]->m_flow.flow_hash_ctt]; + if (m_hashes_in_ctt[m_flow_table[flow_index]->m_flow.flow_hash_ctt] >= 2) { + m_ctt_hash_collision++; + std::vector filtered; + + std::copy_if(m_flow_table.begin(), m_flow_table.end(), std::back_inserter(filtered), + [&](FlowRecord* flow) { return flow->m_flow.flow_hash_ctt == m_flow_table[flow_index]->m_flow.flow_hash_ctt; }); + filtered.size(); + }*/ + m_ctt_controller.create_record(m_flow_table[flow_index]->m_flow.flow_hash_ctt, m_flow_table[flow_index]->m_flow.time_first); + m_cache_stats.ctt_offloaded++; + m_flow_table[flow_index]->is_in_ctt = true; + } +} +#endif /* WITH_CTT */ - if (m_enable_fragmentation_cache) { - try_to_fill_ports_to_fragmented_packet(pkt); +int NHTFlowCache::process_flow(Packet& packet, size_t flow_index, bool flow_is_waiting_for_export) noexcept +{ + if (is_tcp_connection_restart(packet, m_flow_table[flow_index]->m_flow) && !flow_is_waiting_for_export) { + if (try_to_export(flow_index, false, packet.ts, FLOW_END_EOF)) { + put_pkt(packet); + return 0; + } + } + + /* Check if flow record is expired (inactive timeout). */ + if (!flow_is_waiting_for_export + && try_to_export_on_inactive_timeout(flow_index, packet.ts)) { + return put_pkt(packet); + } + + if (!flow_is_waiting_for_export + && try_to_export_on_active_timeout(flow_index, packet.ts)) { + return put_pkt(packet); } - if (!create_hash_key(pkt)) { // saves key value and key length into attributes NHTFlowCache::key and NHTFlowCache::m_keylen + const size_t pre_update_return_flags = plugins_pre_update(m_flow_table[flow_index]->m_flow, packet); + if ((pre_update_return_flags & ProcessPlugin::FlowAction::FLUSH) + && !flow_is_waiting_for_export) { + flush(packet, flow_index, pre_update_return_flags); return 0; } - prefetch_export_expired(); + m_flow_table[flow_index]->update(packet); +#ifdef WITH_CTT + try_to_add_flow_to_ctt(flow_index); +#endif /* WITH_CTT */ + const size_t post_update_return_flags = plugins_post_update(m_flow_table[flow_index]->m_flow, packet); + if ((post_update_return_flags & ProcessPlugin::FlowAction::FLUSH) + && !flow_is_waiting_for_export) { + flush(packet, flow_index, post_update_return_flags); + return 0; + } - uint64_t hashval = XXH64(m_key, m_keylen, 0); /* Calculates hash value from key created before. */ + export_expired(packet.ts); + return 0; +} +#ifdef WITH_CTT +bool NHTFlowCache::try_to_export_delayed_flow(const Packet& packet, size_t flow_index) noexcept +{ + if (!m_flow_table[flow_index]->is_in_ctt) { + return false; + } + if (m_flow_table[flow_index]->is_waiting_for_export && + ((packet.cttmeta_valid && !packet.cttmeta.ctt_rec_matched) || packet.ts > m_flow_table[flow_index]->export_time)) { + plugins_pre_export(m_flow_table[flow_index]->m_flow); + export_flow(flow_index); + return false; + } + return m_flow_table[flow_index]->is_waiting_for_export; +} +#endif /* WITH_CTT */ - FlowRecord *flow; /* Pointer to flow we will be working with. */ - bool found = false; - bool source_flow = true; - uint32_t line_index = hashval & m_line_mask; /* Get index of flow line. */ - uint32_t flow_index = 0; - uint32_t next_line = line_index + m_line_size; +bool NHTFlowCache::try_to_export(size_t flow_index, bool call_pre_export, const timeval& now) noexcept +{ + return try_to_export(flow_index, call_pre_export, now, get_export_reason(m_flow_table[flow_index]->m_flow)); +} - /* Find existing flow record in flow cache. */ - for (flow_index = line_index; flow_index < next_line; flow_index++) { - if (m_flow_table[flow_index]->belongs(hashval)) { - found = true; - break; - } +#ifdef WITH_CTT +void NHTFlowCache::send_export_request_to_ctt(size_t ctt_flow_hash) noexcept +{ + /*if (--m_hashes_in_ctt[ctt_flow_hash] < 0) + { + throw "missing hash in send_export_request_to_ctt!"; } + if (m_hashes_in_ctt[ctt_flow_hash] == 0) { + m_hashes_in_ctt.erase(ctt_flow_hash); + }*/ + m_ctt_controller.export_record(ctt_flow_hash); +} +#endif /* WITH_CTT */ - /* Find inversed flow. */ - if (!found && !m_split_biflow) { - uint64_t hashval_inv = XXH64(m_key_inv, m_keylen, 0); - uint64_t line_index_inv = hashval_inv & m_line_mask; - uint64_t next_line_inv = line_index_inv + m_line_size; - for (flow_index = line_index_inv; flow_index < next_line_inv; flow_index++) { - if (m_flow_table[flow_index]->belongs(hashval_inv)) { - found = true; - source_flow = false; - hashval = hashval_inv; - line_index = line_index_inv; - break; - } +bool NHTFlowCache::try_to_export(size_t flow_index, bool call_pre_export, const timeval& now, int reason) noexcept +{ +#ifdef WITH_CTT + if (m_flow_table[flow_index]->is_in_ctt) { + if (!m_flow_table[flow_index]->is_waiting_for_export) { + m_flow_table[flow_index]->is_waiting_for_export = true; + send_export_request_to_ctt(m_flow_table[flow_index]->m_flow.flow_hash_ctt); + m_flow_table[flow_index]->export_time = {now.tv_sec + 1, now.tv_usec}; + return false; + } + if (m_flow_table[flow_index]->export_time > now) { + return false; } + m_flow_table[flow_index]->is_waiting_for_export = false; } +#endif /* WITH_CTT */ + if (call_pre_export) { + plugins_pre_export(m_flow_table[flow_index]->m_flow); + } + export_flow(flow_index, reason); + return true; +} - if (found) { - /* Existing flow record was found, put flow record at the first index of flow line. */ -#ifdef FLOW_CACHE_STATS - m_lookups += (flow_index - line_index + 1); - m_lookups2 += (flow_index - line_index + 1) * (flow_index - line_index + 1); -#endif /* FLOW_CACHE_STATS */ - - flow = m_flow_table[flow_index]; - for (decltype(flow_index) j = flow_index; j > line_index; j--) { - m_flow_table[j] = m_flow_table[j - 1]; - } +int NHTFlowCache::put_pkt(Packet &pkt) +{ + plugins_pre_create(pkt); - m_flow_table[line_index] = flow; - flow_index = line_index; -#ifdef FLOW_CACHE_STATS - m_hits++; -#endif /* FLOW_CACHE_STATS */ - } else { - /* Existing flow record was not found. Find free place in flow line. */ - for (flow_index = line_index; flow_index < next_line; flow_index++) { - if (m_flow_table[flow_index]->is_empty()) { - found = true; - break; - } - } - if (!found) { - /* If free place was not found (flow line is full), find - * record which will be replaced by new record. */ - flow_index = next_line - 1; - - // Export flow - plugins_pre_export(m_flow_table[flow_index]->m_flow); - m_flow_table[flow_index]->m_flow.end_reason = FLOW_END_NO_RES; - export_flow(flow_index); - -#ifdef FLOW_CACHE_STATS - m_expired++; -#endif /* FLOW_CACHE_STATS */ - uint32_t flow_new_index = line_index + m_line_new_idx; - flow = m_flow_table[flow_index]; - for (decltype(flow_index) j = flow_index; j > flow_new_index; j--) { - m_flow_table[j] = m_flow_table[j - 1]; - } - flow_index = flow_new_index; - m_flow_table[flow_new_index] = flow; -#ifdef FLOW_CACHE_STATS - m_not_empty++; - } else { - m_empty++; -#endif /* FLOW_CACHE_STATS */ - } + if (m_enable_fragmentation_cache) { + try_to_fill_ports_to_fragmented_packet(pkt); } - pkt.source_pkt = source_flow; - flow = m_flow_table[flow_index]; + prefetch_export_expired(); - uint8_t flw_flags = source_flow ? flow->m_flow.src_tcp_flags : flow->m_flow.dst_tcp_flags; - if ((pkt.tcp_flags & 0x02) && (flw_flags & (0x01 | 0x04))) { - // Flows with FIN or RST TCP flags are exported when new SYN packet arrives - m_flow_table[flow_index]->m_flow.end_reason = FLOW_END_EOF; - export_flow(flow_index); - put_pkt(pkt); + auto [hash_value, flow_index, source_to_destination] = find_flow_index(pkt); + pkt.source_pkt = source_to_destination; + const bool hash_created = hash_value.has_value(); + const bool flow_found = flow_index.has_value(); + if (!hash_created) { return 0; } + const size_t row_begin = hash_value.value() & m_line_mask; + CacheRowSpan row_span(&m_flow_table[row_begin], m_line_size); - if (flow->is_empty()) { - m_flows_in_cache++; - flow->create(pkt, hashval); - ret = plugins_post_create(flow->m_flow, pkt); +#ifdef WITH_CTT + const bool flow_is_waiting_for_export = flow_found && try_to_export_delayed_flow(pkt, flow_index.value() + row_begin); +#else + constexpr bool flow_is_waiting_for_export = false; +#endif /* WITH_CTT */ - if (ret & FLOW_FLUSH) { - export_flow(flow_index); -#ifdef FLOW_CACHE_STATS - m_flushed++; -#endif /* FLOW_CACHE_STATS */ - } - } else { - /* Check if flow record is expired (inactive timeout). */ - if (pkt.ts.tv_sec - flow->m_flow.time_last.tv_sec >= m_inactive) { - m_flow_table[flow_index]->m_flow.end_reason = get_export_reason(flow->m_flow); - plugins_pre_export(flow->m_flow); - export_flow(flow_index); - #ifdef FLOW_CACHE_STATS - m_expired++; - #endif /* FLOW_CACHE_STATS */ - return put_pkt(pkt); - } + if (flow_found && !m_flow_table[flow_index.value() + row_begin]->is_empty()) { + /* Existing flow record was found, put flow record at the first index of flow line. */ + m_cache_stats.lookups += flow_index.value() + 1; + m_cache_stats.lookups2 += (flow_index.value() + 1) * (flow_index.value() + 1); + m_cache_stats.hits++; - /* Check if flow record is expired (active timeout). */ - if (pkt.ts.tv_sec - flow->m_flow.time_first.tv_sec >= m_active) { - m_flow_table[flow_index]->m_flow.end_reason = FLOW_END_ACTIVE; - plugins_pre_export(flow->m_flow); - export_flow(flow_index); -#ifdef FLOW_CACHE_STATS - m_expired++; -#endif /* FLOW_CACHE_STATS */ - return put_pkt(pkt); + row_span.advance_flow(flow_index.value()); + flow_index = row_begin; + return process_flow(pkt, flow_index.value(), flow_is_waiting_for_export); + } + /* Existing flow record was not found. Find free place in flow line. */ + const std::optional empty_index = flow_found && m_flow_table[flow_index.value() + row_begin]->is_empty() + ? flow_index.value() + : row_span.find_empty(); + const bool empty_found = empty_index.has_value(); + if (empty_found) { + flow_index = empty_index.value() + row_begin; + m_cache_stats.empty++; + } else { +#ifdef WITH_CTT + const size_t victim_index = row_span.find_victim(pkt.ts); +#else + const size_t victim_index = m_line_size - 1; +#endif /* WITH_CTT */ + row_span.advance_flow_to(victim_index, m_new_flow_insert_index); + flow_index = row_begin + m_new_flow_insert_index; +#ifdef WITH_CTT + if (m_flow_table[flow_index.value()]->is_in_ctt && !m_flow_table[flow_index.value()]->is_waiting_for_export) { + m_flow_table[flow_index.value()]->is_waiting_for_export = true; + send_export_request_to_ctt(m_flow_table[flow_index.value()]->m_flow.flow_hash_ctt); + m_flow_table[flow_index.value()]->export_time = {pkt.ts.tv_sec + 1, pkt.ts.tv_usec}; } +#endif /* WITH_CTT */ + plugins_pre_export(m_flow_table[flow_index.value()]->m_flow); + export_flow(flow_index.value(), FLOW_END_NO_RES); - ret = plugins_pre_update(flow->m_flow, pkt); - if (ret & FLOW_FLUSH) { - flush(pkt, flow_index, ret, source_flow); - return 0; - } else { - flow->update(pkt, source_flow); - ret = plugins_post_update(flow->m_flow, pkt); - - if (ret & FLOW_FLUSH) { - flush(pkt, flow_index, ret, source_flow); - return 0; - } - } + m_cache_stats.not_empty++; } - - export_expired(pkt.ts.tv_sec); + create_record(pkt, flow_index.value(), hash_value.value()); + export_expired(pkt.ts); return 0; } +bool NHTFlowCache::try_to_export_on_active_timeout(size_t flow_index, const timeval& now) noexcept +{ + if (!m_flow_table[flow_index]->is_empty() && now.tv_sec - m_flow_table[flow_index]->m_flow.time_first.tv_sec >= m_active) { + return try_to_export(flow_index, true, now, FLOW_END_ACTIVE); + } + return false; +} + void NHTFlowCache::try_to_fill_ports_to_fragmented_packet(Packet& packet) { m_fragmentation_cache.process_packet(packet); } -uint8_t NHTFlowCache::get_export_reason(Flow &flow) +uint8_t NHTFlowCache::get_export_reason(const Flow& flow) { - if ((flow.src_tcp_flags | flow.dst_tcp_flags) & (0x01 | 0x04)) { + constexpr uint8_t TCP_FIN = 0x01; + constexpr uint8_t TCP_RST = 0x04; + if ((flow.src_tcp_flags | flow.dst_tcp_flags) & (TCP_FIN | TCP_RST)) { // When FIN or RST is set, TCP connection ended naturally return FLOW_END_EOF; - } else { - return FLOW_END_INACTIVE; } + return FLOW_END_INACTIVE; } -void NHTFlowCache::export_expired(time_t ts) +void NHTFlowCache::export_expired(time_t now) { - for (decltype(m_timeout_idx) i = m_timeout_idx; i < m_timeout_idx + m_line_new_idx; i++) { - if (!m_flow_table[i]->is_empty() && ts - m_flow_table[i]->m_flow.time_last.tv_sec >= m_inactive) { - m_flow_table[i]->m_flow.end_reason = get_export_reason(m_flow_table[i]->m_flow); - plugins_pre_export(m_flow_table[i]->m_flow); - export_flow(i); -#ifdef FLOW_CACHE_STATS - m_expired++; -#endif /* FLOW_CACHE_STATS */ - } - } - - m_timeout_idx = (m_timeout_idx + m_line_new_idx) & (m_cache_size - 1); + export_expired({now, 0}); } -bool NHTFlowCache::create_hash_key(Packet &pkt) +void NHTFlowCache::export_expired(const timeval& now) { - if (pkt.ip_version == IP::v4) { - struct flow_key_v4_t *key_v4 = reinterpret_cast(m_key); - struct flow_key_v4_t *key_v4_inv = reinterpret_cast(m_key_inv); - - key_v4->proto = pkt.ip_proto; - key_v4->ip_version = IP::v4; - key_v4->src_port = pkt.src_port; - key_v4->dst_port = pkt.dst_port; - key_v4->src_ip = pkt.src_ip.v4; - key_v4->dst_ip = pkt.dst_ip.v4; - key_v4->vlan_id = pkt.vlan_id; - - key_v4_inv->proto = pkt.ip_proto; - key_v4_inv->ip_version = IP::v4; - key_v4_inv->src_port = pkt.dst_port; - key_v4_inv->dst_port = pkt.src_port; - key_v4_inv->src_ip = pkt.dst_ip.v4; - key_v4_inv->dst_ip = pkt.src_ip.v4; - key_v4_inv->vlan_id = pkt.vlan_id; + for (size_t i = m_last_exported_on_timeout_index; i < m_last_exported_on_timeout_index + m_new_flow_insert_index; i++) { + try_to_export_on_inactive_timeout(i, now); + } + m_last_exported_on_timeout_index = (m_last_exported_on_timeout_index + m_new_flow_insert_index) & (m_cache_size - 1); +} - m_keylen = sizeof(flow_key_v4_t); +bool NHTFlowCache::create_hash_key(const Packet& packet) +{ + if (packet.ip_version == IP::v4) { + m_key = FlowKeyv4::save_direct(packet); + m_key_reversed = FlowKeyv4::save_reversed(packet); return true; - } else if (pkt.ip_version == IP::v6) { - struct flow_key_v6_t *key_v6 = reinterpret_cast(m_key); - struct flow_key_v6_t *key_v6_inv = reinterpret_cast(m_key_inv); - - key_v6->proto = pkt.ip_proto; - key_v6->ip_version = IP::v6; - key_v6->src_port = pkt.src_port; - key_v6->dst_port = pkt.dst_port; - memcpy(key_v6->src_ip, pkt.src_ip.v6, sizeof(pkt.src_ip.v6)); - memcpy(key_v6->dst_ip, pkt.dst_ip.v6, sizeof(pkt.dst_ip.v6)); - key_v6->vlan_id = pkt.vlan_id; - - key_v6_inv->proto = pkt.ip_proto; - key_v6_inv->ip_version = IP::v6; - key_v6_inv->src_port = pkt.dst_port; - key_v6_inv->dst_port = pkt.src_port; - memcpy(key_v6_inv->src_ip, pkt.dst_ip.v6, sizeof(pkt.dst_ip.v6)); - memcpy(key_v6_inv->dst_ip, pkt.src_ip.v6, sizeof(pkt.src_ip.v6)); - key_v6_inv->vlan_id = pkt.vlan_id; - - m_keylen = sizeof(flow_key_v6_t); + } else if (packet.ip_version == IP::v6) { + m_key = FlowKeyv6::save_direct(packet); + m_key_reversed = FlowKeyv6::save_reversed(packet); return true; } - return false; } -#ifdef FLOW_CACHE_STATS -void NHTFlowCache::print_report() +void NHTFlowCache::print_report() const { - float tmp = float(m_lookups) / m_hits; - - cout << "Hits: " << m_hits << endl; - cout << "Empty: " << m_empty << endl; - cout << "Not empty: " << m_not_empty << endl; - cout << "Expired: " << m_expired << endl; - cout << "Flushed: " << m_flushed << endl; - cout << "Average Lookup: " << tmp << endl; - cout << "Variance Lookup: " << float(m_lookups2) / m_hits - tmp * tmp << endl; + const float tmp = static_cast(m_cache_stats.lookups) / m_cache_stats.hits; + + std::cout << "Hits: " << m_cache_stats.hits << std::endl; + std::cout << "Empty: " << m_cache_stats.empty << std::endl; + std::cout << "Not empty: " << m_cache_stats.not_empty << std::endl; + std::cout << "Expired: " << m_cache_stats.exported << std::endl; + std::cout << "Flushed: " << m_cache_stats.flushed << std::endl; + std::cout << "Average Lookup: " << tmp << std::endl; + std::cout << "Variance Lookup: " << static_cast(m_cache_stats.lookups2) / m_cache_stats.hits - tmp * tmp << std::endl; +#ifdef WITH_CTT + std::cout << "CTT offloaded: " << m_cache_stats.ctt_offloaded << std::endl; +#endif /* WITH_CTT */ } -#endif /* FLOW_CACHE_STATS */ void NHTFlowCache::set_telemetry_dir(std::shared_ptr dir) { @@ -637,8 +627,8 @@ telemetry::Content NHTFlowCache::get_cache_telemetry() dict["FlowEndReason:Collision"] = m_flow_end_reason_stats.collision; dict["FlowEndReason:Forced"] = m_flow_end_reason_stats.forced; - dict["FlowsInCache"] = m_flows_in_cache; - dict["FlowCacheUsage"] = telemetry::ScalarWithUnit {double(m_flows_in_cache) / m_cache_size * 100, "%"}; + dict["FlowsInCache"] = m_cache_stats.flows_in_cache; + dict["FlowCacheUsage"] = telemetry::ScalarWithUnit {double(m_cache_stats.flows_in_cache) / m_cache_size * 100, "%"}; dict["FlowRecordStats:1packet"] = m_flow_record_stats.packets_count_1; dict["FlowRecordStats:2-5packets"] = m_flow_record_stats.packets_count_2_5; @@ -647,15 +637,23 @@ telemetry::Content NHTFlowCache::get_cache_telemetry() dict["FlowRecordStats:21-50packets"] = m_flow_record_stats.packets_count_21_50; dict["FlowRecordStats:51-plusPackets"] = m_flow_record_stats.packets_count_51_plus; - dict["TotalExportedFlows"] = m_total_exported; + dict["TotalExportedFlows"] = m_cache_stats.total_exported; return dict; } void NHTFlowCache::prefetch_export_expired() const { - for (decltype(m_timeout_idx) i = m_timeout_idx; i < m_timeout_idx + m_line_new_idx; i++) { + for (decltype(m_last_exported_on_timeout_index) i = m_last_exported_on_timeout_index; i < m_last_exported_on_timeout_index + m_new_flow_insert_index; i++) { __builtin_prefetch(m_flow_table[i], 0, 1); } } +#ifdef WITH_CTT +void NHTFlowCache::set_ctt_config(const std::string& device_name, unsigned channel_id) +{ + m_ctt_device = device_name; + m_ctt_comp_index = channel_id/16; +} +#endif /* WITH_CTT */ + } diff --git a/storage/cache.hpp b/storage/cache.hpp index 57087f5b8..8c7637b8d 100644 --- a/storage/cache.hpp +++ b/storage/cache.hpp @@ -32,152 +32,21 @@ #ifndef IPXP_STORAGE_CACHE_HPP #define IPXP_STORAGE_CACHE_HPP +#include #include - #include -#include +#include #include -#include #include - +#include #include "fragmentationCache/fragmentationCache.hpp" +#include "cacheOptParser.hpp" +#include "flowKey.tpp" +#include "flowRecord.hpp" +#include "cttController.hpp" namespace ipxp { -struct __attribute__((packed)) flow_key_v4_t { - uint16_t src_port; - uint16_t dst_port; - uint8_t proto; - uint8_t ip_version; - uint32_t src_ip; - uint32_t dst_ip; - uint16_t vlan_id; -}; - -struct __attribute__((packed)) flow_key_v6_t { - uint16_t src_port; - uint16_t dst_port; - uint8_t proto; - uint8_t ip_version; - uint8_t src_ip[16]; - uint8_t dst_ip[16]; - uint16_t vlan_id; -}; - -#define MAX_KEY_LENGTH (max(sizeof(flow_key_v4_t), sizeof(flow_key_v6_t))) - -#ifdef IPXP_FLOW_CACHE_SIZE -static const uint32_t DEFAULT_FLOW_CACHE_SIZE = IPXP_FLOW_CACHE_SIZE; -#else -static const uint32_t DEFAULT_FLOW_CACHE_SIZE = 17; // 131072 records total -#endif /* IPXP_FLOW_CACHE_SIZE */ - -#ifdef IPXP_FLOW_LINE_SIZE -static const uint32_t DEFAULT_FLOW_LINE_SIZE = IPXP_FLOW_LINE_SIZE; -#else -static const uint32_t DEFAULT_FLOW_LINE_SIZE = 4; // 16 records per line -#endif /* IPXP_FLOW_LINE_SIZE */ - -static const uint32_t DEFAULT_INACTIVE_TIMEOUT = 30; -static const uint32_t DEFAULT_ACTIVE_TIMEOUT = 300; - -static_assert(std::is_unsigned(), "Static checks of default cache sizes won't properly work without unsigned type."); -static_assert(bitcount(-1) > DEFAULT_FLOW_CACHE_SIZE, "Flow cache size is too big to fit in variable!"); -static_assert(bitcount(-1) > DEFAULT_FLOW_LINE_SIZE, "Flow cache line size is too big to fit in variable!"); - -static_assert(DEFAULT_FLOW_LINE_SIZE >= 1, "Flow cache line size must be at least 1!"); -static_assert(DEFAULT_FLOW_CACHE_SIZE >= DEFAULT_FLOW_LINE_SIZE, "Flow cache size must be at least cache line size!"); - -class CacheOptParser : public OptionsParser -{ -public: - uint32_t m_cache_size; - uint32_t m_line_size; - uint32_t m_active; - uint32_t m_inactive; - bool m_split_biflow; - bool m_enable_fragmentation_cache; - std::size_t m_frag_cache_size; - time_t m_frag_cache_timeout; - - CacheOptParser() : OptionsParser("cache", "Storage plugin implemented as a hash table"), - m_cache_size(1 << DEFAULT_FLOW_CACHE_SIZE), m_line_size(1 << DEFAULT_FLOW_LINE_SIZE), - m_active(DEFAULT_ACTIVE_TIMEOUT), m_inactive(DEFAULT_INACTIVE_TIMEOUT), m_split_biflow(false), - m_enable_fragmentation_cache(true), m_frag_cache_size(10007), // Prime for better distribution in hash table - m_frag_cache_timeout(3) - { - register_option("s", "size", "EXPONENT", "Cache size exponent to the power of two", - [this](const char *arg){try {unsigned exp = str2num(arg); - if (exp < 4 || exp > 30) { - throw PluginError("Flow cache size must be between 4 and 30"); - } - m_cache_size = static_cast(1) << exp; - } catch(std::invalid_argument &e) {return false;} return true;}, - OptionFlags::RequiredArgument); - register_option("l", "line", "EXPONENT", "Cache line size exponent to the power of two", - [this](const char *arg){try {m_line_size = static_cast(1) << str2num(arg); - if (m_line_size < 1) { - throw PluginError("Flow cache line size must be at least 1"); - } - } catch(std::invalid_argument &e) {return false;} return true;}, - OptionFlags::RequiredArgument); - register_option("a", "active", "TIME", "Active timeout in seconds", - [this](const char *arg){try {m_active = str2num(arg);} catch(std::invalid_argument &e) {return false;} return true;}, - OptionFlags::RequiredArgument); - register_option("i", "inactive", "TIME", "Inactive timeout in seconds", - [this](const char *arg){try {m_inactive = str2num(arg);} catch(std::invalid_argument &e) {return false;} return true;}, - OptionFlags::RequiredArgument); - register_option("S", "split", "", "Split biflows into uniflows", - [this](const char *arg){ m_split_biflow = true; return true;}, OptionFlags::NoArgument); - register_option("fe", "frag-enable", "true|false", "Enable/disable fragmentation cache. Enabled (true) by default.", - [this](const char *arg){ - if (strcmp(arg, "true") == 0) { - m_enable_fragmentation_cache = true; - } else if (strcmp(arg, "false") == 0) { - m_enable_fragmentation_cache = false; - } else { - return false; - } - return true; - }, OptionFlags::RequiredArgument); - register_option("fs", "frag-size", "size", "Size of fragmentation cache, must be at least 1. Default value is 10007.", [this](const char *arg) { - try { - m_frag_cache_size = str2num(arg); - } catch(std::invalid_argument &e) { - return false; - } - return m_frag_cache_size > 0; - }); - register_option("ft", "frag-timeout", "TIME", "Timeout of fragments in fragmentation cache in seconds. Default value is 3.", [this](const char *arg) { - try { - m_frag_cache_timeout = str2num(arg); - } catch(std::invalid_argument &e) { - return false; - } - return true; - }); - } -}; - -class alignas(64) FlowRecord -{ - uint64_t m_hash; - -public: - Flow m_flow; - - FlowRecord(); - ~FlowRecord(); - - void erase(); - void reuse(); - - inline bool is_empty() const; - inline bool belongs(uint64_t pkt_hash) const; - void create(const Packet &pkt, uint64_t pkt_hash); - void update(const Packet &pkt, bool src); -}; - struct FlowEndReasonStats { uint64_t active_timeout; uint64_t inactive_timeout; @@ -195,19 +64,32 @@ struct FlowRecordStats { uint64_t packets_count_51_plus; }; +struct FlowCacheStats{ + uint64_t empty; + uint64_t not_empty; + uint64_t hits; + uint64_t exported{0}; + uint64_t flushed; + uint64_t lookups{0}; + uint64_t lookups2{0}; + uint64_t flows_in_cache; + uint64_t total_exported; + uint64_t ctt_offloaded{0}; +}; + class NHTFlowCache : TelemetryUtils, public StoragePlugin { public: NHTFlowCache(); - ~NHTFlowCache(); - void init(const char *params); - void close(); - void set_queue(ipx_ring_t *queue); - OptionsParser *get_parser() const { return new CacheOptParser(); } - std::string get_name() const { return "cache"; } + ~NHTFlowCache() override; + void init(const char* params) override; + void close() override; + void set_queue(ipx_ring_t* queue) override; + OptionsParser * get_parser() const override; + std::string get_name() const noexcept override; - int put_pkt(Packet &pkt); - void export_expired(time_t ts); + int put_pkt(Packet& pkt) override; + void export_expired(time_t now) override; /** * @brief Set and configure the telemetry directory where cache stats will be stored. @@ -218,51 +100,61 @@ class NHTFlowCache : TelemetryUtils, public StoragePlugin uint32_t m_cache_size; uint32_t m_line_size; uint32_t m_line_mask; - uint32_t m_line_new_idx; - uint32_t m_qsize; - uint32_t m_qidx; - uint32_t m_timeout_idx; - uint64_t m_flows_in_cache = 0; - uint64_t m_total_exported = 0; -#ifdef FLOW_CACHE_STATS - uint64_t m_empty; - uint64_t m_not_empty; - uint64_t m_hits; - uint64_t m_expired; - uint64_t m_flushed; - uint64_t m_lookups; - uint64_t m_lookups2; -#endif /* FLOW_CACHE_STATS */ + uint32_t m_new_flow_insert_index; + uint32_t m_queue_size; + uint32_t m_queue_index{0}; + uint32_t m_last_exported_on_timeout_index{0}; + uint32_t m_active; uint32_t m_inactive; bool m_split_biflow; bool m_enable_fragmentation_cache; - uint8_t m_keylen; - char m_key[MAX_KEY_LENGTH]; - char m_key_inv[MAX_KEY_LENGTH]; - FlowRecord **m_flow_table; - FlowRecord *m_flow_records; + std::variant m_key; + std::variant m_key_reversed; + std::vector m_flow_table; + std::vector m_flows; FragmentationCache m_fragmentation_cache; FlowEndReasonStats m_flow_end_reason_stats = {}; FlowRecordStats m_flow_record_stats = {}; + FlowCacheStats m_cache_stats = {}; +#ifdef WITH_CTT + void set_ctt_config(const std::string& device_name, unsigned channel_id) override; + std::string m_ctt_device; + unsigned m_ctt_comp_index; + CttController m_ctt_controller; + //std::unordered_map m_hashes_in_ctt; + //size_t m_ctt_hash_collision{0}; +#endif /* WITH_CTT */ void try_to_fill_ports_to_fragmented_packet(Packet& packet); - void flush(Packet &pkt, size_t flow_index, int ret, bool source_flow); - bool create_hash_key(Packet &pkt); - void export_flow(size_t index); - static uint8_t get_export_reason(Flow &flow); + void flush(Packet &pkt, size_t flow_index, int return_flags); + bool create_hash_key(const Packet &packet); + static uint8_t get_export_reason(const Flow &flow); void finish(); - + void allocate_table(); void update_flow_end_reason_stats(uint8_t reason); void update_flow_record_stats(uint64_t packets_count); telemetry::Content get_cache_telemetry(); void prefetch_export_expired() const; - -#ifdef FLOW_CACHE_STATS - void print_report(); -#endif /* FLOW_CACHE_STATS */ + void get_parser_options(CacheOptParser& parser) noexcept; + void push_to_export_queue(size_t flow_index) noexcept; + std::tuple, std::optional, bool> find_flow_index(const Packet& packet) noexcept; + bool try_to_export_on_inactive_timeout(size_t flow_index, const timeval& now) noexcept; + bool try_to_export_on_active_timeout(size_t flow_index, const timeval& now) noexcept; + void export_flow(size_t flow_index, int reason); + void export_flow(size_t flow_index); + int process_flow(Packet& packet, size_t flow_index, bool flow_is_waiting_for_export) noexcept; + bool try_to_export_delayed_flow(const Packet& packet, size_t flow_index) noexcept; + void create_record(const Packet& packet, size_t flow_index, size_t hash_value) noexcept; + bool try_to_export(size_t flow_index, bool call_pre_export, const timeval& now, int reason) noexcept; + bool try_to_export(size_t flow_index, bool call_pre_export, const timeval& now) noexcept; + void print_report() const; + void send_export_request_to_ctt(size_t ctt_flow_hash) noexcept; + void export_expired(const timeval& now); + void try_to_add_flow_to_ctt(size_t flow_index) noexcept; + bool needs_to_be_offloaded(size_t flow_index) const noexcept; }; } -#endif /* IPXP_STORAGE_CACHE_HPP */ +#endif /* IPXP_STORAGE_CACHE_HPP */ \ No newline at end of file diff --git a/storage/cacheOptParser.cpp b/storage/cacheOptParser.cpp new file mode 100644 index 000000000..2fadb0b15 --- /dev/null +++ b/storage/cacheOptParser.cpp @@ -0,0 +1,114 @@ +/** +* \file + * \author Damir Zainullin + * \brief CacheOptParser implementation. + */ +/* + * Copyright (C) 2023 CESNET + * + * LICENSE TERMS + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the Company nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + */ + +#include "cacheOptParser.hpp" + +#include +#include +#include + +namespace ipxp { + +#ifdef IPXP_FLOW_CACHE_SIZE +static const uint32_t DEFAULT_FLOW_CACHE_SIZE = IPXP_FLOW_CACHE_SIZE; +#else +static const uint32_t DEFAULT_FLOW_CACHE_SIZE = 17; // 131072 records total +#endif /* IPXP_FLOW_CACHE_SIZE */ + +#ifdef IPXP_FLOW_LINE_SIZE +static const uint32_t DEFAULT_FLOW_LINE_SIZE = IPXP_FLOW_LINE_SIZE; +#else +static const uint32_t DEFAULT_FLOW_LINE_SIZE = 4; // 16 records per line +#endif /* IPXP_FLOW_LINE_SIZE */ + +static const uint32_t DEFAULT_INACTIVE_TIMEOUT = 30; +static const uint32_t DEFAULT_ACTIVE_TIMEOUT = 300; + +static_assert(std::is_unsigned(), "Static checks of default cache sizes won't properly work without unsigned type."); +static_assert(bitcount(-1) > DEFAULT_FLOW_CACHE_SIZE, "Flow cache size is too big to fit in variable!"); +static_assert(bitcount(-1) > DEFAULT_FLOW_LINE_SIZE, "Flow cache line size is too big to fit in variable!"); + +static_assert(DEFAULT_FLOW_LINE_SIZE >= 1, "Flow cache line size must be at least 1!"); +static_assert(DEFAULT_FLOW_CACHE_SIZE >= DEFAULT_FLOW_LINE_SIZE, "Flow cache size must be at least cache line size!"); + +CacheOptParser::CacheOptParser() : OptionsParser("cache", "Storage plugin implemented as a hash table"), + m_cache_size(1 << DEFAULT_FLOW_CACHE_SIZE), m_line_size(1 << DEFAULT_FLOW_LINE_SIZE), + m_active(DEFAULT_ACTIVE_TIMEOUT), m_inactive(DEFAULT_INACTIVE_TIMEOUT), m_split_biflow(false), + m_enable_fragmentation_cache(true), m_frag_cache_size(10007), // Prime for better distribution in hash table + m_frag_cache_timeout(3) + { + register_option("s", "size", "EXPONENT", "Cache size exponent to the power of two", + [this](const char *arg){try {unsigned exp = str2num(arg); + if (exp < 4 || exp > 30) { + throw PluginError("Flow cache size must be between 4 and 30"); + } + m_cache_size = static_cast(1) << exp; + } catch(std::invalid_argument &e) {return false;} return true;}, + OptionFlags::RequiredArgument); + register_option("l", "line", "EXPONENT", "Cache line size exponent to the power of two", + [this](const char *arg){try {m_line_size = static_cast(1) << str2num(arg); + if (m_line_size < 1) { + throw PluginError("Flow cache line size must be at least 1"); + } + } catch(std::invalid_argument &e) {return false;} return true;}, + OptionFlags::RequiredArgument); + register_option("a", "active", "TIME", "Active timeout in seconds", + [this](const char *arg){try {m_active = str2num(arg);} catch(std::invalid_argument &e) {return false;} return true;}, + OptionFlags::RequiredArgument); + register_option("i", "inactive", "TIME", "Inactive timeout in seconds", + [this](const char *arg){try {m_inactive = str2num(arg);} catch(std::invalid_argument &e) {return false;} return true;}, + OptionFlags::RequiredArgument); + register_option("S", "split", "", "Split biflows into uniflows", + [this](const char *arg){ m_split_biflow = true; return true;}, OptionFlags::NoArgument); + register_option("fe", "frag-enable", "true|false", "Enable/disable fragmentation cache. Enabled (true) by default.", + [this](const char *arg){ + if (strcmp(arg, "true") == 0) { + m_enable_fragmentation_cache = true; + } else if (strcmp(arg, "false") == 0) { + m_enable_fragmentation_cache = false; + } else { + return false; + } + return true; + }, OptionFlags::RequiredArgument); + register_option("fs", "frag-size", "size", "Size of fragmentation cache, must be at least 1. Default value is 10007.", [this](const char *arg) { + try { + m_frag_cache_size = str2num(arg); + } catch(std::invalid_argument &e) { + return false; + } + return m_frag_cache_size > 0; + }); + register_option("ft", "frag-timeout", "TIME", "Timeout of fragments in fragmentation cache in seconds. Default value is 3.", [this](const char *arg) { + try { + m_frag_cache_timeout = str2num(arg); + } catch(std::invalid_argument &e) { + return false; + } + return true; + }); + } + + +} // ipxp \ No newline at end of file diff --git a/storage/cacheOptParser.hpp b/storage/cacheOptParser.hpp new file mode 100644 index 000000000..15b5d66ef --- /dev/null +++ b/storage/cacheOptParser.hpp @@ -0,0 +1,49 @@ +/** +* \file + * \author Damir Zainullin + * \brief Contains the CacheOptParser class for parsing cache options. + */ +/* + * Copyright (C) 2023 CESNET + * + * LICENSE TERMS + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the Company nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + */ + +#pragma once + +#include +#include +#include + +namespace ipxp { + +class CacheOptParser : public OptionsParser +{ +public: + uint32_t m_cache_size; + uint32_t m_line_size; + uint32_t m_active; + uint32_t m_inactive; + bool m_split_biflow; + bool m_enable_fragmentation_cache; + std::size_t m_frag_cache_size; + time_t m_frag_cache_timeout; + + CacheOptParser(); +}; + + +} // ipxp diff --git a/storage/cacheRowSpan.cpp b/storage/cacheRowSpan.cpp new file mode 100644 index 000000000..dd5d11d63 --- /dev/null +++ b/storage/cacheRowSpan.cpp @@ -0,0 +1,91 @@ +/** +* \file + * \author Damir Zainullin + * \brief CacheRowSpan implementation. + */ +/* + * Copyright (C) 2023 CESNET + * + * LICENSE TERMS + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the Company nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + */ + +#include "cacheRowSpan.hpp" + +#include + +#include "fragmentationCache/timevalUtils.hpp" + +namespace ipxp { + +CacheRowSpan::CacheRowSpan(FlowRecord** begin, size_t count) noexcept + : m_begin(begin), m_count(count) +{ +} + +std::optional CacheRowSpan::find_by_hash(uint64_t hash) const noexcept +{ + auto it = std::find_if(m_begin, m_begin + m_count, [&](const FlowRecord* flow) { + return flow->belongs(hash); + }); + if (it == m_begin + m_count) { + return std::nullopt; + } + return it - m_begin; +} + +void CacheRowSpan::advance_flow_to(size_t from, size_t to) noexcept +{ + if (from < to) { + std::rotate(m_begin + from, m_begin + from + 1, m_begin + to + 1); + return; + } + std::rotate(m_begin + to, m_begin + from, m_begin + from + 1); +} + +void CacheRowSpan::advance_flow(size_t flow_index) noexcept +{ + advance_flow_to(flow_index, 0); +} + +std::optional CacheRowSpan::find_empty() const noexcept +{ + auto it = std::find_if(m_begin, m_begin + m_count, [](const FlowRecord* flow) { + return flow->is_empty(); + }); + if (it == m_begin + m_count) { + return std::nullopt; + } + return it - m_begin; +} + +#ifdef WITH_CTT +size_t CacheRowSpan::find_victim(const timeval& now) const noexcept +{ + FlowRecord* const* victim = m_begin + m_count - 1; + auto it = std::find_if(m_begin, m_begin + m_count, [&](FlowRecord* const& flow) { + if (!flow->is_in_ctt) { + victim = &flow; + } + return flow->is_waiting_for_export && now > flow->export_time; + }); + if (it == m_begin + m_count) { + return victim - m_begin; + } + return it - m_begin; +} +#endif /* WITH_CTT */ + +} // ipxp \ No newline at end of file diff --git a/storage/cacheRowSpan.hpp b/storage/cacheRowSpan.hpp new file mode 100644 index 000000000..4b1ab5ee8 --- /dev/null +++ b/storage/cacheRowSpan.hpp @@ -0,0 +1,82 @@ +/** +* \file + * \author Damir Zainullin + * \brief CacheRowSpan declaration. + */ +/* + * Copyright (C) 2023 CESNET + * + * LICENSE TERMS + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the Company nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + */ + +#pragma once + +#include +#include +#include "flowRecord.hpp" + +namespace ipxp { +/** + * \brief Class representing a non-owning view of a row span in a cache. + */ +class CacheRowSpan { +public: + /** + * \brief Construct a new CacheRowSpan object. + * \param begin Pointer to the first element in the row. + * \param count Number of elements in the row. + */ + CacheRowSpan(FlowRecord** begin, size_t count) noexcept; + + /** + * \brief Find a flow record by hash. + * \param hash Hash value to search for. + * \return Index of the flow record relative to row begin if found, std::nullopt otherwise. + */ + std::optional find_by_hash(uint64_t hash) const noexcept; + /** + * \brief Move a flow record to the beginning of the row. + * \param flow_index Index of the flow record to move. + */ + void advance_flow(size_t flow_index) noexcept; + + /** + * \brief Move a flow record to a specific position in the row. + * \param from Index of the flow record to move. + * \param to Index of the position to move the flow record to. + */ + void advance_flow_to(size_t from, size_t to) noexcept; + + /** + * \brief Find an empty flow record in the row. + * \return Index of the empty flow record if found, std::nullopt otherwise. + */ + std::optional find_empty() const noexcept; +#ifdef WITH_CTT + /** + * \brief Find a flow record to be evicted. + * \param now Current time. + * \return Index of flow from ctt which has delayed export timeout expired if found, + * last record which is not in ctt, or last record otherwise + */ + size_t find_victim(const timeval& now) const noexcept; +#endif /* WITH_CTT */ +private: + FlowRecord** m_begin; + size_t m_count; +}; + +} // ipxp diff --git a/storage/cttController.cpp b/storage/cttController.cpp new file mode 100644 index 000000000..6c90cf0d3 --- /dev/null +++ b/storage/cttController.cpp @@ -0,0 +1,115 @@ +/** +* \file + * \author Damir Zainullin + * \brief CttController implementation. + */ +/* + * Copyright (C) 2023 CESNET + * + * LICENSE TERMS + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the Company nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + */ + +#include "cttController.hpp" + +#ifdef WITH_CTT + +namespace ipxp { + +void CttController::init(const std::string& nfb_dev, unsigned ctt_comp_index) { + m_commander = std::make_unique(ctt::NfbParamsFast{nfb_dev, ctt_comp_index}); + try { + // Get UserInfo to determine key, state, and state_mask sizes + ctt::UserInfo user_info = m_commander->get_user_info(); + key_size_bytes = (user_info.key_bit_width + 7) / 8; + state_size_bytes = (user_info.state_bit_width + 7) / 8; + state_mask_size_bytes = (user_info.state_mask_bit_width + 7) / 8; + + // Enable the CTT + std::future enable_future = m_commander->enable(true); + enable_future.wait(); + } + catch (const std::exception& e) { + throw; + } +} + +void CttController::create_record(uint64_t flow_hash_ctt, const struct timeval& ts) +{ + try { + std::vector key = assemble_key(flow_hash_ctt); + std::vector state = assemble_state( + OffloadMode::PACKET_OFFLOAD, + MetaType::FULL, + ts); + m_commander->write_record(std::move(key), std::move(state)); + } + catch (const std::exception& e) { + throw; + } +} + +void CttController::export_record(uint64_t flow_hash_ctt) +{ + try { + std::vector key = assemble_key(flow_hash_ctt); + m_commander->export_and_delete_record(std::move(key)); + } + catch (const std::exception& e) { + throw; + } +} + +std::vector CttController::assemble_key(uint64_t flow_hash_ctt) +{ + std::vector key(key_size_bytes, std::byte(0)); + for (size_t i = 0; i < sizeof(flow_hash_ctt) && i < key_size_bytes; ++i) { + key[i] = static_cast((flow_hash_ctt >> (8 * i)) & 0xFF); + } + return key; +} + +std::vector CttController::assemble_state( + OffloadMode offload_mode, MetaType meta_type, const struct timeval& ts) +{ + std::vector state(state_size_bytes, std::byte(0)); + std::vector state_mask(state_mask_size_bytes, std::byte(0)); + + state[0] = static_cast(offload_mode); + state[1] = static_cast(meta_type); + + // timestamp in sec/ns format, 32+32 bits - 64 bits in total + for (size_t i = 0; i < sizeof(ts.tv_sec) && i < 4; ++i) { + state[2 + i] = static_cast((ts.tv_sec >> (8 * i)) & 0xFF); + } + for (size_t i = 0; i < sizeof(ts.tv_usec) && i < 4; ++i) { + state[6 + i] = static_cast((ts.tv_usec >> (8 * i)) & 0xFF); + } + return state; +} + +CttController::~CttController() noexcept +{ + if (!m_commander) { + return; + } + std::future enable_future = m_commander->enable(false); + enable_future.wait(); + m_commander.reset(); +} + +} // ipxp + +#endif /* WITH_CTT */ diff --git a/storage/cttController.hpp b/storage/cttController.hpp new file mode 100644 index 000000000..99b0a48dc --- /dev/null +++ b/storage/cttController.hpp @@ -0,0 +1,107 @@ +/** +* \file + * \author Damir Zainullin + * \brief CttController declaration. + */ +/* + * Copyright (C) 2023 CESNET + * + * LICENSE TERMS + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the Company nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + */ + +#pragma once + +#include +#ifdef WITH_CTT +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ipxp { + +class CttController { +public: + enum class OffloadMode : uint8_t { + NO_OFFLOAD = 0x0, + PACKET_OFFLOAD = 0x1, + META_EXPORT = 0x2, + PACKET_OFFLOAD_WITH_EXPORT = 0x3 + }; + enum class MetaType : uint8_t { + FULL = 0x0, + HALF = 0x1, + TS_ONLY = 0x2, + NO_META = 0x3 + }; + /** + * @brief init the CTT. + * + * @param nfb_dev The NFB device file (e.g., "/dev/nfb0"). + * @param ctt_comp_index The index of the CTT component. + */ + void init(const std::string& nfb_dev, unsigned ctt_comp_index); + + /** + * @brief Command: mark a flow for offload. + * + * @param flow_hash_ctt The flow hash to be offloaded. + */ + void create_record(uint64_t flow_hash_ctt, const struct timeval& timestamp_first); + + /** + * @brief Command: export a flow from the CTT. + * + * @param flow_hash_ctt The flow hash to be exported. + */ + void export_record(uint64_t flow_hash_ctt); + + ~CttController() noexcept; + +private: + std::unique_ptr m_commander; + size_t key_size_bytes; + size_t state_size_bytes; + size_t state_mask_size_bytes; + + /** + * @brief Assembles the state vector from the given values. + * + * @param offload_mode The offload mode. + * @param meta_type The metadata type. + * @param timestamp_first The first timestamp of the flow. + * @return A byte vector representing the assembled state vector. + */ + std::vector assemble_state( + OffloadMode offload_mode, MetaType meta_type, + const struct timeval& timestamp_first); + + /** + * @brief Assembles the key vector from the given flow hash. + * + * @param flow_hash_ctt The flow hash. + * @return A byte vector representing the assembled key vector. + */ + std::vector assemble_key(uint64_t flow_hash_ctt); +}; + +} // ipxp + +#endif /* WITH_CTT */ diff --git a/storage/flowKey.tpp b/storage/flowKey.tpp new file mode 100644 index 000000000..3f88cb8b2 --- /dev/null +++ b/storage/flowKey.tpp @@ -0,0 +1,105 @@ +/** +* \file + * \author Damir Zainullin + * \brief FlowKey structure declaration. + */ +/* + * Copyright (C) 2023 CESNET + * + * LICENSE TERMS + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the Company nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + */ + +#pragma once + +#include +#include +#include + +namespace ipxp { + +template +struct FlowKey { + uint16_t src_port; + uint16_t dst_port; + uint8_t proto; + uint8_t ip_version; + std::array src_ip; + std::array dst_ip; + uint16_t vlan_id; +protected: + void save_direct(const Packet& packet) noexcept + { + src_port = packet.src_port; + dst_port = packet.dst_port; + proto = packet.ip_proto; + ip_version = packet.ip_version; + vlan_id = packet.vlan_id; + } + + void save_reversed(const Packet& packet) noexcept + { + save_direct(packet); + src_port = packet.dst_port; + dst_port = packet.src_port; + } + +} __attribute__((packed)); + +struct FlowKeyv4 : FlowKey<4> { + + static FlowKeyv4 save_direct(const Packet& packet) noexcept + { + FlowKeyv4 res; + res.FlowKey::save_direct(packet); + std::memcpy(res.src_ip.data(), &packet.src_ip.v4, 4); + std::memcpy(res.dst_ip.data(), &packet.dst_ip.v4, 4); + return res; + } + + static FlowKeyv4 save_reversed(const Packet& packet) noexcept + { + FlowKeyv4 res; + res.FlowKey::save_reversed(packet); + std::memcpy(res.src_ip.data(), &packet.dst_ip.v4, 4); + std::memcpy(res.dst_ip.data(), &packet.src_ip.v4, 4); + return res; + } + +}; + +struct FlowKeyv6 : FlowKey<16> { + + static FlowKeyv6 save_direct(const Packet& packet) noexcept + { + FlowKeyv6 res; + res.FlowKey::save_direct(packet); + std::memcpy(res.src_ip.data(), &packet.src_ip.v4, 16); + std::memcpy(res.dst_ip.data(), &packet.dst_ip.v4, 16); + return res; + } + + static FlowKeyv6 save_reversed(const Packet& packet) noexcept + { + FlowKeyv6 res; + res.FlowKey::save_reversed(packet); + std::memcpy(res.src_ip.data(), &packet.dst_ip.v4, 16); + std::memcpy(res.dst_ip.data(), &packet.src_ip.v4, 16); + return res; + } + +}; + +} // namespace ipxp \ No newline at end of file diff --git a/storage/flowRecord.cpp b/storage/flowRecord.cpp new file mode 100644 index 000000000..12026a259 --- /dev/null +++ b/storage/flowRecord.cpp @@ -0,0 +1,147 @@ +/** +* \file + * \author Damir Zainullin + * \brief FlowRecord implementation. + */ +/* + * Copyright (C) 2023 CESNET + * + * LICENSE TERMS + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the Company nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + */ + +#include "flowRecord.hpp" + +#include +#include +#include + +namespace ipxp { + +FlowRecord::FlowRecord() +{ + erase(); +}; + +FlowRecord::~FlowRecord() +{ + erase(); +}; + +void FlowRecord::erase() +{ + m_flow.remove_extensions(); + m_hash = 0; + memset(&m_flow.time_first, 0, sizeof(m_flow.time_first)); + memset(&m_flow.time_last, 0, sizeof(m_flow.time_last)); + m_flow.ip_version = 0; + m_flow.ip_proto = 0; + memset(&m_flow.src_ip, 0, sizeof(m_flow.src_ip)); + memset(&m_flow.dst_ip, 0, sizeof(m_flow.dst_ip)); + m_flow.src_port = 0; + m_flow.dst_port = 0; + m_flow.src_packets = 0; + m_flow.dst_packets = 0; + m_flow.src_bytes = 0; + m_flow.dst_bytes = 0; + m_flow.src_tcp_flags = 0; + m_flow.dst_tcp_flags = 0; +#ifdef WITH_CTT + is_waiting_for_export = false; + is_in_ctt = false; +#endif /* WITH_CTT */ +} +void FlowRecord::reuse() +{ + m_flow.remove_extensions(); + m_flow.time_first = m_flow.time_last; + m_flow.src_packets = 0; + m_flow.dst_packets = 0; + m_flow.src_bytes = 0; + m_flow.dst_bytes = 0; + m_flow.src_tcp_flags = 0; + m_flow.dst_tcp_flags = 0; +#ifdef WITH_CTT + is_waiting_for_export = false; + is_in_ctt = false; +#endif /* WITH_CTT */ +} + +void FlowRecord::create(const Packet &pkt, uint64_t hash) +{ + m_flow.src_packets = 1; + + m_hash = hash; + + m_flow.time_first = pkt.ts; + m_flow.time_last = pkt.ts; + m_flow.flow_hash = hash; + + memcpy(m_flow.src_mac, pkt.src_mac, 6); + memcpy(m_flow.dst_mac, pkt.dst_mac, 6); + + if (pkt.ip_version == IP::v4) { + m_flow.ip_version = pkt.ip_version; + m_flow.ip_proto = pkt.ip_proto; + m_flow.src_ip.v4 = pkt.src_ip.v4; + m_flow.dst_ip.v4 = pkt.dst_ip.v4; + m_flow.src_bytes = pkt.ip_len; + } else if (pkt.ip_version == IP::v6) { + m_flow.ip_version = pkt.ip_version; + m_flow.ip_proto = pkt.ip_proto; + memcpy(m_flow.src_ip.v6, pkt.src_ip.v6, 16); + memcpy(m_flow.dst_ip.v6, pkt.dst_ip.v6, 16); + m_flow.src_bytes = pkt.ip_len; + } + + if (pkt.ip_proto == IPPROTO_TCP) { + m_flow.src_port = pkt.src_port; + m_flow.dst_port = pkt.dst_port; + m_flow.src_tcp_flags = pkt.tcp_flags; + } else if (pkt.ip_proto == IPPROTO_UDP) { + m_flow.src_port = pkt.src_port; + m_flow.dst_port = pkt.dst_port; + } else if (pkt.ip_proto == IPPROTO_ICMP || + pkt.ip_proto == IPPROTO_ICMPV6) { + m_flow.src_port = pkt.src_port; + m_flow.dst_port = pkt.dst_port; + } +#ifdef WITH_CTT + is_waiting_for_export = false; + is_in_ctt = false; +#endif /* WITH_CTT */ +} + +void FlowRecord::update(const Packet &pkt) +{ + m_flow.time_last = pkt.ts; + if (pkt.source_pkt) { + m_flow.src_packets++; + m_flow.src_bytes += pkt.ip_len; + + if (pkt.ip_proto == IPPROTO_TCP) { + m_flow.src_tcp_flags |= pkt.tcp_flags; + } + } else { + m_flow.dst_packets++; + m_flow.dst_bytes += pkt.ip_len; + + if (pkt.ip_proto == IPPROTO_TCP) { + m_flow.dst_tcp_flags |= pkt.tcp_flags; + } + } +} + +} // ipxp \ No newline at end of file diff --git a/storage/flowRecord.hpp b/storage/flowRecord.hpp new file mode 100644 index 000000000..ed31d990c --- /dev/null +++ b/storage/flowRecord.hpp @@ -0,0 +1,67 @@ +/** +* \file + * \author Damir Zainullin + * \brief FlowRecord declaration. + */ +/* + * Copyright (C) 2023 CESNET + * + * LICENSE TERMS + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the Company nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + */ + +#pragma once + +#include +#include +#include +#include + +namespace ipxp { + +class alignas(64) FlowRecord +{ + uint64_t m_hash; +public: + Flow m_flow; +#ifdef WITH_CTT + bool is_in_ctt; /**< Flow is offloaded by CTT if set. */ + bool is_waiting_for_export; /**< Export request of flow was sent to ctt, + but still has not been processed in ctt. */ + timeval export_time; /**< Time point when we sure that the export request has already been processed by ctt, + and flow is not in ctt anymore. */ +#endif /* WITH_CTT */ + + FlowRecord(); + ~FlowRecord(); + + void erase(); + void reuse(); + + __attribute__((always_inline)) bool is_empty() const noexcept + { + return m_hash == 0; + } + + __attribute__((always_inline)) bool belongs(uint64_t hash) const noexcept + { + return hash == m_hash; + } + + void create(const Packet &pkt, uint64_t pkt_hash); + void update(const Packet &pkt); +}; + +} // ipxp diff --git a/storage/fragmentationCache/timevalUtils.hpp b/storage/fragmentationCache/timevalUtils.hpp index fdd66f6e0..590f91900 100644 --- a/storage/fragmentationCache/timevalUtils.hpp +++ b/storage/fragmentationCache/timevalUtils.hpp @@ -28,7 +28,7 @@ namespace ipxp { -struct timeval operator+(const struct timeval& a, const struct timeval& b) noexcept +inline struct timeval operator+(const struct timeval& a, const struct timeval& b) noexcept { constexpr time_t USEC_IN_SEC = 1000000; @@ -42,7 +42,7 @@ struct timeval operator+(const struct timeval& a, const struct timeval& b) noexc return result; } -bool operator>(const struct timeval& a, const struct timeval& b) noexcept +inline bool operator>(const struct timeval& a, const struct timeval& b) noexcept { if (a.tv_sec == b.tv_sec) return a.tv_usec > b.tv_usec;