diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..35c75a5 --- /dev/null +++ b/404.html @@ -0,0 +1,1287 @@ + + + +
+ + + + + + + + + + + + + + + + + + ++go-intelowl is a client library/SDK that allows developers to easily automate and integrate IntelOwl with their own set of tools!
+ +Use go get to retrieve the SDK to add it to your GOPATH workspace, or project's Go module dependencies.
+ +This library was built with ease of use in mind! Here are some quick examples to get you started. If you need more example you can go to the examples directory
+To start using the go-intelowl library you first need to import it:
+ +Construct a new IntelOwlClient
, then use the various services to easily access different parts of Intelowl's REST API. Here's an example of getting all jobs:
clientOptions := gointelowl.IntelOwlClientOptions{
+ Url: "your-cool-URL-goes-here",
+ Token: "your-super-secret-token-goes-here",
+ // This is optional
+ Certificate: "your-optional-certificate-goes-here",
+}
+
+intelowl := gointelowl.NewIntelOwlClient(
+ &clientOptions,
+ nil
+)
+
+ctx := context.Background()
+
+// returns *[]Jobs or an IntelOwlError!
+jobs, err := intelowl.JobService.List(ctx)
+
For easy configuration and set up we opted for options
structs. Where we can customize the client API or service endpoint to our liking! For more information go here. Here's a quick example!
// ...Making the client and context!
+
+tagOptions = gointelowl.TagParams{
+ Label: "NEW TAG",
+ Color: "#ffb703",
+}
+
+createdTag, err := intelowl.TagService.Create(ctx, tagOptions)
+if err != nil {
+ fmt.Println(err)
+} else {
+ fmt.Println(createdTag)
+}
+
The examples directory contains a couple for clear examples, of which one is partially listed here as well:
+package main
+
+import (
+ "fmt"
+
+ "github.com/intelowlproject/go-intelowl/gointelowl"
+)
+
+func main(){
+ intelowlOptions := gointelowl.IntelOwlClientOptions{
+ Url: "your-cool-url-goes-here",
+ Token: "your-super-secret-token-goes-here",
+ Certificate: "your-optional-certificate-goes-here",
+ }
+
+ client := gointelowl.NewIntelOwlClient(
+ &intelowlOptions,
+ nil,
+ )
+
+ ctx := context.Background()
+
+ // Get User details!
+ user, err := client.UserService.Access(ctx)
+ if err != nil {
+ fmt.Println("err")
+ fmt.Println(err)
+ } else {
+ fmt.Println("USER Details")
+ fmt.Println(*user)
+ }
+}
+
For complete usage of go-intelowl, see the full package docs.
+If you want to follow the updates, discuss, contribute, or just chat then please join our slack channel we'd love to hear your feedback!
+Licensed under the GNU AFFERO GENERAL PUBLIC LICENSE.
+You need a valid API key to interact with the IntelOwl server.
+You can get an API by doing the following:
+API Access/ Sessions
Keys should be created from the admin interface of IntelOwl: you have to go in the Durin section (click on Auth tokens
) and generate a key there.
enrichment
Handle enrichment requests for a specific observable (domain or IP address).
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+request
+ |
++ | +
+
+
+The incoming request object containing query parameters. + |
++required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
Response | + | +
+
+
+A JSON response indicating whether the observable was found, + |
+
+ | +
+
+
+and if so, the corresponding IOC. + |
+
docs/Submodules/GreedyBear/api/views.py
feeds
Handle requests for IOC feeds with specific parameters and format the response accordingly.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+request
+ |
++ | +
+
+
+The incoming request object. + |
++required + | +
+feed_type
+ |
+
+str
+ |
+
+
+
+Type of feed (e.g., log4j, cowrie, etc.). + |
++required + | +
+attack_type
+ |
+
+str
+ |
+
+
+
+Type of attack (e.g., all, specific attack types). + |
++required + | +
+age
+ |
+
+str
+ |
+
+
+
+Age of the data to filter (e.g., recent, persistent). + |
++required + | +
+format_
+ |
+
+str
+ |
+
+
+
+Desired format of the response (e.g., json, csv, txt). + |
++required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
Response | + | +
+
+
+The HTTP response with formatted IOC data. + |
+
docs/Submodules/GreedyBear/api/views.py
feeds_pagination
Handle requests for paginated IOC feeds based on query parameters.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+request
+ |
++ | +
+
+
+The incoming request object. + |
++required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
Response | + | +
+
+
+The paginated HTTP response with IOC data. + |
+
docs/Submodules/GreedyBear/api/views.py
Statistics
+ Bases: ViewSet
A viewset for viewing and editing statistics related to feeds and enrichment data.
+Provides actions to retrieve statistics about the sources and downloads of feeds, +as well as statistics on enrichment data.
+docs/Submodules/GreedyBear/api/views.py
279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 |
|
__aggregation_response_static_ioc(annotations)
+Helper method to generate IOC response based on annotations.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+annotations
+ |
+
+dict
+ |
+
+
+
+Dictionary containing the annotations for the query. + |
++required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
Response |
+Response
+ |
+
+
+
+A JSON response containing the aggregated IOC data. + |
+
docs/Submodules/GreedyBear/api/views.py
__aggregation_response_static_statistics(annotations)
+Helper method to generate statistics response based on annotations.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+annotations
+ |
+
+dict
+ |
+
+
+
+Dictionary containing the annotations for the query. + |
++required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
Response |
+Response
+ |
+
+
+
+A JSON response containing the aggregated statistics. + |
+
docs/Submodules/GreedyBear/api/views.py
__parse_range(request)
+
+staticmethod
+
+Parse the range parameter from the request query string to determine the time range for the query.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+request
+ |
++ | +
+
+
+The incoming request object. + |
++required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
tuple | + | +
+
+
+A tuple containing the delta time and basis for the query range. + |
+
docs/Submodules/GreedyBear/api/views.py
enrichment(request, pk=None)
+Retrieve enrichment statistics, including the number of sources and requests.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+request
+ |
++ | +
+
+
+The incoming request object. + |
++required + | +
+pk
+ |
+
+str
+ |
+
+
+
+The type of statistics to retrieve (e.g., "sources", "requests"). + |
+
+None
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
Response | + | +
+
+
+A JSON response containing the requested statistics. + |
+
docs/Submodules/GreedyBear/api/views.py
feeds(request, pk=None)
+Retrieve feed statistics, including the number of sources and downloads.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+request
+ |
++ | +
+
+
+The incoming request object. + |
++required + | +
+pk
+ |
+
+str
+ |
+
+
+
+The type of statistics to retrieve (e.g., "sources", "downloads"). + |
+
+None
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
Response | + | +
+
+
+A JSON response containing the requested statistics. + |
+
docs/Submodules/GreedyBear/api/views.py
feeds_types(request)
+Retrieve statistics for different types of feeds, including Log4j, Cowrie, +and general honeypots.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+request
+ |
++ | +
+
+
+The incoming request object. + |
++required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
Response | + | +
+
+
+A JSON response containing the feed type statistics. + |
+
docs/Submodules/GreedyBear/api/views.py
general_honeypot_list
Retrieve a list of all general honeypots, optionally filtering by active status.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+request
+ |
++ | +
+
+
+The incoming request object containing query parameters. + |
++required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
Response | + | +
+
+
+A JSON response containing the list of general honeypots. + |
+
docs/Submodules/GreedyBear/api/views.py
Please refer to IntelOwl Documentation for everything missing here.
+GreedyBear welcomes contributors from anywhere and from any kind of education or skill level. We strive to create a community of developers that is welcoming, friendly and right.
+For this reason it is important to follow some easy rules based on a simple but important concept: Respect.
+Keeping to a consistent code style throughout the project makes it easier to contribute and collaborate. We make use of psf/black
and isort for code formatting and flake8
for style guides.
To start with the development setup, make sure you go through all the steps in Installation Guide and properly installed it.
+Please create a new branch based on the develop branch that contains the most recent changes. This is mandatory.
+git checkout -b myfeature develop
Then we strongly suggest to configure pre-commit to force linters on every commits you perform:
+# create virtualenv to host pre-commit installation
+python3 -m venv venv
+source venv/bin/activate
+# from the project base directory
+pip install pre-commit
+pre-commit install -c .github/.pre-commit-config.yaml
+
Remember that whenever you make changes, you need to rebuild the docker image to see the reflected changes.
+If you made any changes to an existing model/serializer/view, please run the following command to generate a new version of the API schema and docs:
+docker exec -it greedybear_uwsgi python manage.py spectacular --file docs/source/schema.yml && make html
+
To start the frontend in "develop" mode, you can execute the startup npm script within the folder frontend
:
cd frontend/
+# Install
+npm i
+# Start
+DANGEROUSLY_DISABLE_HOST_CHECK=true npm start
+# See https://create-react-app.dev/docs/proxying-api-requests-in-development/#invalid-host-header-errors-after-configuring-proxy for why we use that flag in development mode
+
Most of the time you would need to test the changes you made together with the backend. In that case, you would need to run the backend locally too:
+ +The GreedyBear Frontend is tightly linked to the certego-ui
library. Most of the React components are imported from there. Because of this, it may happen that, during development, you would need to work on that library too.
+To install the certego-ui
library, please take a look to npm link and remember to start certego-ui without installing peer dependencies (to avoid conflicts with GreedyBear dependencies):
git clone https://github.com/certego/certego-ui.git
+# change directory to the folder where you have the cloned the library
+cd certego-ui/
+# install, without peer deps (to use packages of GreedyBear)
+npm i --legacy-peer-deps
+# create link to the project (this will globally install this package)
+sudo npm link
+# compile the library
+npm start
+
Then, open another command line tab, create a link in the frontend
to the certego-ui
and re-install and re-start the frontend application (see previous section):
This trick will allow you to see reflected every changes you make in the certego-ui
directly in the running frontend
application.
The certego-ui
application comes with an example project that showcases the components that you can re-use and import to other projects, like GreedyBear:
# To have the Example application working correctly, be sure to have installed `certego-ui` *without* the `--legacy-peer-deps` option and having it started in another command line
+cd certego-ui/
+npm i
+npm start
+# go to another tab
+cd certego-ui/example/
+npm i
+npm start
+
Please create pull requests only for the branch develop. That code will be pushed to master only on a new release.
+Also remember to pull the most recent changes available in the develop branch before submitting your PR. If your PR has merge conflicts caused by this behavior, it won't be accepted.
+You have to install pre-commit
to have your code adjusted and fixed with the available linters:
Once done that, you won't have to think about linters anymore.
+All the frontend tests must be run from the folder frontend
.
+The tests can contain log messages, you can suppress then with the environment variable SUPPRESS_JEST_LOG=True
.
npm test -- -t '<describeString> <testString>'
+// example
+npm test -- -t "Login component User login"
+
if you get any errors, fix them. +Once you make sure that everything is working fine, please squash all of our commits into a single one and finally create a pull request.
+For requirements, please refer to IntelOwl requirements which are the same
+Note that GreedyBear needs a running instance of ElasticSearch of a T-POT to function. In docker/env_file
, set the variable ELASTIC_ENDPOINT
with the URL of your Elasticsearch T-POT.
If you don't have one, you can make the following changes to make GreeyBear spin up it's own ElasticSearch instance. +(...Care! This option would require enough RAM to run the additional containers. Suggested is >=16GB):
+docker/env_file
, set the variable ELASTIC_ENDPOINT
to http://elasticsearch:9200
.:docker/elasticsearch.yml
to the last defined COMPOSE_FILE
variable or uncomment the # local development with elasticsearch container
block in .env
file.Start by cloning the project
+# clone the Greedybear project repository
+git clone https://github.com/honeynet/GreedyBear
+cd GreedyBear/
+
+# construct environment files from templates
+cp .env_template .env
+cd docker/
+cp env_file_template env_file
+cp env_file_postgres_template env_file_postgres
+
Now you can start by building the image using docker-compose and run the project.
+# build the image locally
+docker-compose build
+
+# start the app
+docker-compose up
+
+# now the app is running on http://localhost:80
+
+# shut down the application
+docker-compose down
+
Note: To create a superuser run the following:
+ +The app administrator can enable/disable the extraction of source IPs for specific honeypots from the Django Admin. +This is used for honeypots that are not specifically implemented to extract additional information (so not Log4Pot and Cowrie).
+In the env_file
, configure different variables as explained below.
Required variable to set:
+DEFAULT_FROM_EMAIL
: email address used for automated correspondence from the site manager (example: noreply@mydomain.com
)DEFAULT_EMAIL
: email address used for correspondence with users (example: info@mydomain.com
)EMAIL_HOST
: the host to use for sending email with SMTPEMAIL_HOST_USER
: username to use for the SMTP server defined in EMAIL_HOSTEMAIL_HOST_PASSWORD
: password to use for the SMTP server defined in EMAIL_HOST. This setting is used in conjunction with EMAIL_HOST_USER when authenticating to the SMTP server.EMAIL_PORT
: port to use for the SMTP server defined in EMAIL_HOST.EMAIL_USE_TLS
: whether to use an explicit TLS (secure) connection when talking to the SMTP server, generally used on port 587.EMAIL_USE_SSL
: whether to use an implicit TLS (secure) connection when talking to the SMTP server, generally used on port 465.Optional configuration:
+SLACK_TOKEN
: Slack token of your Slack application that will be used to send/receive notificationsDEFAULT_SLACK_CHANNEL
: ID of the Slack channel you want to post the message toGreedybear leverages a python client for interacting with ElasticSearch which requires to be at the exact major version of the related T-POT ElasticSearch instance. +This means that there could problems if those versions do not match.
+The actual version of the client installed is the 8.15.0 which allows to run TPOT version from 22.04.0 to 24.04.0 without any problems (and some later ones...we regularly check T-POT releases but we could miss one or two here.)
+If you want to have compatibility with previous versions, you need to change the elasticsearch-dsl
version here and re-build locally the project.
If you make some code changes and you like to rebuild the project, follow these steps:
+.env
file has a COMPOSE_FILE
variable which mounts the docker/local.override.yml
compose file.docker-compose build
to build the new docker image.docker-compose up
.To update the project with the most recent available code you have to follow these steps:
+$ cd <your_greedy_bear_directory> # go into the project directory
+$ git pull # pull new repository changes
+$ docker pull intelowlproject/greedybear:prod # pull new docker images
+$ docker-compose down # stop and destroy the currently running GreedyBear containers
+$ docker-compose up # restart the GreedyBear application
+
The project goal is to extract data of the attacks detected by a TPOT or a cluster of them and to generate some feeds that can be used to prevent and detect attacks.
+ +There are public feeds provided by The Honeynet Project in this site: greedybear.honeynet.org. Example
+To check all the available feeds, Please refer to our usage guide
+Please do not perform too many requests to extract feeds or you will be banned.
+If you want to be updated regularly, please download the feeds only once every 10 minutes (this is the time between each internal update).
+Since Greedybear v1.1.0 we added a Registration Page that can be used to manage Registration requests when providing GreedyBear as a Service.
+After an user registration, an email is sent to the user to verify their email address. If necessary, there are buttons on the login page to resend the verification email and to reset the password.
+Once the user has verified their email, they would be manually vetted before being allowed to use the GreedyBear platform. The registration requests would be handled in the Django Admin page by admins. +If you have GreedyBear deployed on an AWS instance you can use the SES service.
+In a development environment the emails that would be sent are written to the standard output.
+If you like, you could use Amazon SES for sending automated emails.
+First, you need to configure the environment variable AWS_SES
to True
to enable it.
+Then you have to add some credentials for AWS: if you have GreedyBear deployed on the AWS infrastructure, you can use IAM credentials:
+to allow that just set AWS_IAM_ACCESS
to True
. If that is not the case, you have to set both AWS_ACCESS_KEY_ID
and AWS_SECRET_ACCESS_KEY
.
Additionally, if you are not using the default AWS region of us-east-1, you need to specify your AWS_REGION
.
+You can customize the AWS Region location of you services by changing the environment variable AWS_REGION
. Default is eu-central-1
.
GreedyBear is created with the aim to collect the information from the TPOTs and generate some actionable feeds, so that they can be easily accessible and act as valuable information to prevent and detect attacks.
+The feeds are reachable through the following URL:
+ +The available feed_type are:
+log4j
: attacks detected from the Log4pot.cowrie
: attacks detected from the Cowrie Honeypot.all
: get all types at onceheralding
ciscoasa
honeytrap
dionaea
conpot
adbhoney
tanner
citrixhoneypot
mailoney
ipphoney
ddospot
elasticpot
dicompot
redishoneypot
sentrypeer
glutton
The available attack_type are:
+scanner
: IP addresses captured by the honeypots while performing attackspayload_request
: IP addresses and domains extracted from payloads that would have been executed after a speficic attack would have been successfulall
: get all types at onceThe available age are:
+recent
: most recent IOCs seen in the last 3 dayspersistent
: these IOCs are the ones that were seen regularly by the honeypots. This feeds will start empty once no prior data was collected and will become bigger over time.The available formats are:
+txt
: plain text (just one line for each IOC)csv
: CSV-like file (just one line for each IOC)json
: JSON file with additional information regarding the IOCsCheck the API specification or the to get all the details about how to use the available APIs.
+GreedyBear provides an easy-to-query API to get the information available in GB regarding the queried observable (domain or IP address).
+ +This "Enrichment" API is protected through authentication. Please reach out Matteo Lodi or another member of The Honeynet Project if you are interested in gain access to this API.
+If you would like to leverage this API without the need of writing even a line of code and together with a lot of other awesome tools, consider using IntelOwl.
+When you write or modify Python code in the codebase, it's important to add or update the docstrings accordingly. If you wish to display these docstrings in the documentation, follow these steps.
+Suppose the docstrings are located in the following path: docs/Submodules/IntelOwl/api_app/analyzers_manager/classes
, and you want to show the description of a class, such as BaseAnalyzerMixin.
To include this in the documentation, use the following command:
+ +Warning
+Make sure your path is correct and syntax is correct. +If you face any issues even path is correct then read the Submodules Guide. +
+ Bases: Plugin
Abstract Base class for Analyzers. +Never inherit from this branch, +always use either one of ObservableAnalyzer or FileAnalyzer classes.
+docs/Submodules/IntelOwl/api_app/analyzers_manager/classes.py
27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 |
|
analyzer_name: str
+
+property
+
+Returns the name of the analyzer.
+config_exception
+
+classmethod
+property
+
+Returns the AnalyzerConfigurationException class.
+config_model
+
+classmethod
+property
+
+Returns the AnalyzerConfig model.
+report_model
+
+classmethod
+property
+
+Returns the AnalyzerReport model.
+after_run_success(content)
+Handles actions after a successful run.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+content
+ |
+
+any
+ |
+
+
+
+The content to process after a successful run. + |
++required + | +
docs/Submodules/IntelOwl/api_app/analyzers_manager/classes.py
get_exceptions_to_catch()
+Returns additional exceptions to catch when running start fn
+ +To set up and run the documentation site on your local machine, please follow the steps below:
+To create a virtual environment named venv
in your project directory, use the following command:
Activate the virtual environment to ensure that all dependencies are installed locally within your project directory.
+On Linux/MacOS:
+ +On Windows:
+ +To install all the necessary Python packages listed in requirements.txt, run:
+ +Please run these commands to update and fetch the local Submodules.
+git submodule foreach --recursive 'git fetch --all'
+git submodule update --init --remote --recursive --depth 1
+git submodule sync --recursive
+git submodule update --remote --recursive
+
Start a local development server to preview the documentation in your web browser. The server will automatically reload whenever you make changes to the documentation files.
+ +As you edit the documentation, you can view your changes in real-time through the local server. This step ensures everything looks as expected before deploying.
+Once you are satisfied with your changes, commit and push them to the GitHub repository. The documentation will be automatically deployed via GitHub Actions, making it live on the documentation site.
+This page includes details about some advanced features that Intel Owl provides which can be optionally configured by the administrator.
+Available for version > 6.1.0
+Right now only ElasticSearch v8 is supported.
+In the env_file_app_template
, you'd see various elasticsearch related environment variables. The user should spin their own Elastic Search instance and configure these variables.
ELASTIC_HOST
with the URL of the external instance.--elastic
option you can run a container based Elasticsearch instance. In this case the ELASTIC_HOST
must be set to https://elasticsearch:9200. Configure also ELASTIC_PASSWORD
.Thanks to django-elasticsearch-dsl Job results are indexed into elasticsearch. The save
and delete
operations are auto-synced so you always have the latest data in ES.
With elasticsearch-py the AnalyzerReport, ConnectorReport and PivotReport objects are indexed into elasticsearch. In this way is possible to search data inside the report fields and many other via the UI. Each time IntelOwl is restarted the index template is updated and the every 5 minutes a task insert the reports in ElasticSearch.
+IntelOwl stores data that can be used for Business Intelligence purpose. +Since plugin reports are deleted periodically, this feature allows to save indefinitely small amount of data to keep track of how analyzers perform and user usage. +At the moment, the following information are sent to elastic:
+Documents are saved in the ELEASTICSEARCH_BI_INDEX-%YEAR-%MONTH
, allowing to manage the retention accordingly.
+To activate this feature, it is necessary to set ELASTICSEARCH_BI_ENABLED
to True
in the env_file_app
and
+ELASTICSEARCH_BI_HOST
to elasticsearch:9200
+or your elasticsearch server.
An index template is created after the first bulk submission of reports.
+IntelOwl provides support for some of the most common authentication methods:
+The first step is to create a Google Cloud Platform project, and then create OAuth credentials for it.
+It is important to add the correct callback in the "Authorized redirect URIs" section to allow the application to redirect properly after the successful login. Add this:
+ +After that, specify the client ID and secret as GOOGLE_CLIENT_ID
and GOOGLE_CLIENT_SECRET
environment variables and restart IntelOwl to see the applied changes.
Note
+While configuring Google Auth2 you can choose either to enable access to the all users with a Google Account ("External" mode) or to enable access to only the users of your organization ("Internal" mode). +Reference +IntelOwl leverages Django-auth-ldap to perform authentication via LDAP.
+How to configure and enable LDAP on Intel Owl?
+configuration/ldap_config.py
. This file is mounted as a docker volume, so you won't need to rebuild the image.Note
+For more details on how to configure this file, check the official documentation of the django-auth-ldap library. +LDAP_ENABLED
as True
in the environment configuration file env_file_app
.
+ Finally, you can restart the application with docker-compose up
IntelOwl leverages Django-radius to perform authentication +via RADIUS server.
+How to configure and enable RADIUS authentication on Intel Owl?
+configuration/radius_config.py
. This file is mounted as a
+ docker volume, so you won't need to rebuild the image.Note
+For more details on how to configure this file, check the official documentation of the django-radius library. +RADIUS_AUTH_ENABLED
as True
in the environment
+ configuration file env_file_app
. Finally, you can restart the application with docker-compose up
Like many other integrations that we have, we have an Analyzer and a Connector for the OpenCTI platform.
+This allows the users to leverage these 2 popular open source projects and frameworks together.
+So why we have a section here? This is because there are various compatibility problems with the official PyCTI library.
+We found out (see issues in IntelOwl and PyCTI) that, most of the times, it is required that the OpenCTI version of the server you are using and the pycti version installed in IntelOwl must match perfectly.
+Because of that, we decided to provide to the users the chance to customize the version of PyCTI installed in IntelOwl based on the OpenCTI version that they are using.
+To do that, you would need to leverage the option --pycti-version
provided by the ./start
helper:
--pycti-version
with ./start -h
./start test build --pycti-version <your_version>
./start test up -- --build
We have support for several AWS services.
+You can customize the AWS Region location of you services by changing the environment variable AWS_REGION
. Default is eu-central-1
You have to add some credentials for AWS: if you have IntelOwl deployed on the AWS infrastructure, you can use IAM credentials:
+to allow that just set AWS_IAM_ACCESS
to True
. If that is not the case, you have to set both AWS_ACCESS_KEY_ID
and AWS_SECRET_ACCESS_KEY
If you prefer to use S3 to store the analyzed samples, instead of the local storage, you can do it.
+First, you need to configure the environment variable LOCAL_STORAGE
to False
to enable it and set AWS_STORAGE_BUCKET_NAME
to the AWS bucket you want to use.
Then you need to configure permission access to the chosen S3 bucket.
+IntelOwl at the moment supports 3 different message brokers:
+The default broker, if nothing is specified, is Redis
.
To use RabbitMQ
, you must use the option --rabbitmq
when launching IntelOwl with the ./start
script.
To use AWS SQS
, you must use the option --sqs
when launching IntelOwl with the .start
script.
+In that case, you should create new FIFO SQS queues in AWS called intelowl-<environment>-<queue_name>.fifo
and give your instances on AWS the proper permissions to access it.
+Moreover, you must populate the AWS_USER_NUMBER
. This is required to connect in the right way to the selected SQS queues.
+Only FIFO queues are supported.
If you want to use a remote message broker (like an ElasticCache
or AmazonMQ
instance), you must populate the BROKER_URL
environment variable.
It is possible to use task priority inside IntelOwl: each User has default priority of 10, and robots users (like the Ingestors) have a priority of 7.
+You can customize these priorities inside Django Admin, in the Authentication.User Profiles
section.
Redis
is used for two different functions:
For this reason, a Redis
instance is mandatory.
+You can personalize IntelOwl in two different way:
Redis
instance.This is the default behaviour.
+Redis
instance.You must use the option --use-external-redis
when launching IntelOwl with the .start
script.
+Moreover, you need to populate the WEBSOCKETS_URL
environment variable. If you are using Redis
as a message broker too, remember to populate the BROKER_URL
environment variable
If you like, you could use AWS RDS instead of PostgreSQL for your database. In that case, you should change the database required options accordingly: DB_HOST
, DB_PORT
, DB_USER
, DB_PASSWORD
and setup your machine to access the service.
If you have IntelOwl deployed on the AWS infrastructure, you can use IAM credentials to access the Postgres DB.
+To allow that just set AWS_RDS_IAM_ROLE
to True
. In this case DB_PASSWORD
is not required anymore.
Moreover, to avoid to run PostgreSQL locally, you would need to use the option --use-external-database
when launching IntelOwl with the ./start
script.
If you like, you could use Amazon SES for sending automated emails (password resets / registration requests, etc).
+You need to configure the environment variable AWS_SES
to True
to enable it.
You can use the "Secrets Manager" to store your credentials. In this way your secrets would be better protected.
+Instead of adding the variables to the environment file, you should just add them with the same name on the AWS Secrets Manager and Intel Owl will fetch them transparently.
+Obviously, you should have created and managed the permissions in AWS in advance and accordingly to your infrastructure requirements.
+Also, you need to set the environment variable AWS_SECRETS
to True
to enable this mode.
You can use a Network File System
for the shared_files that are downloaded runtime by IntelOwl (for example Yara rules).
To use this feature, you would need to add the address of the remote file system inside the .env
file,
+and you would need to use the option --nfs
when launching IntelOwl with the ./start
script.
Right now there is no official support for Kubernetes deployments.
+But we have an active community. Please refer to the following blog post for an example on how to deploy IntelOwl on Google Kubernetes Engine:
+Deploying Intel-Owl on GKE by Mayank Malik.
+IntelOwl provides an additional multi-queue.override.yml compose file allowing IntelOwl users to better scale with the performance of their own architecture.
+If you want to leverage it, you should add the option --multi-queue
when starting the project. Example:
This functionality is not enabled by default because this deployment would start 2 more containers so the resource consumption is higher. We suggest to use this option only when leveraging IntelOwl massively.
+It is possible to define new celery workers: each requires the addition of a new container in the docker-compose file, as shown in the multi-queue.override.yml
.
Moreover IntelOwl requires that the name of the workers are provided in the docker-compose
file. This is done through the environment variable CELERY_QUEUES
inside the uwsgi
container. Each queue must be separated using the character ,
, as shown in the example.
One can customize what analyzer should use what queue by specifying so in the analyzer entry in the analyzer_config.json configuration file. If no queue(s) are provided, the default
queue will be selected.
IntelOwl provides an additional flower.override.yml compose file allowing IntelOwl users to use Flower features to monitor and manage queues and tasks
+If you want to leverage it, you should add the option --flower
when starting the project. Example:
The flower interface is available at port 5555: to set the credentials for its access, update the environment variables
+ +or change the .htpasswd
file that is created in the docker
directory in the intelowl_flower
container.
The ./start
script essentially acts as a wrapper over Docker Compose, performing additional checks.
+IntelOwl can still be started by using the standard docker compose
command, but all the dependencies have to be manually installed by the user.
The --project-directory
and -p
options are required to run the project.
+Default values set by ./start
script are "docker" and "intel_owl", respectively.
The startup is based on chaining various Docker Compose YAML files using -f
option.
+All Docker Compose files are stored in docker/
directory of the project.
+The default compose file, named default.yml
, requires configuration for an external database and message broker.
+In their absence, the postgres.override.yml
and rabbitmq.override.yml
files should be chained to the default one.
The command composed, considering what is said above (using sudo
), is
sudo docker compose --project-directory docker -f docker/default.yml -f docker/postgres.override.yml -f docker/rabbitmq.override.yml -p intel_owl up
+
The other most common compose file that can be used is for the testing environment.
+The equivalent of running ./start test up
is adding the test.override.yml
file, resulting in:
sudo docker compose --project-directory docker -f docker/default.yml -f docker/postgres.override.yml -f docker/rabbitmq.override.yml -f docker/test.override.yml -p intel_owl up
+
All other options available in the ./start
script (./start -h
to view them) essentially chain other compose file to docker compose
command with corresponding filenames.
IntelOwl includes integrations with some analyzer that are not enabled by default.
+These analyzers, stored under the integrations/
directory, are packed within Docker Compose files.
+The compose.yml
file has to be chained to include the analyzer.
+The additional compose-test.yml
file has to be chained for testing environment.
This page includes details about some advanced features that Intel Owl provides which can be optionally enabled. Namely,
+Starting from IntelOwl v4, a new "Organization" section is available on the GUI. This section substitute the previous permission management via Django Admin and aims to provide an easier way to manage users and visibility.
+Thanks to the "Organization" feature, IntelOwl can be used by multiple SOCs, companies, etc...very easily. +Right now it works very simply: only users in the same organization can see analysis of one another. An user can belong to an organization only.
+You can create a new organization by going to the "Organization" section, available under the Dropdown menu you cand find under the username.
+Once you create an organization, you are the unique "Owner" of that organization. So you are the only one who can delete the organization and promote/demote/kick users. +Another role, which is called "Admin", can be set to a user (via the Django Admin interface only for now). +Owners and admins share the following powers: they can manage invitations and the organization's plugin configuration.
+Once an invite has sent, the invited user has to login, go to the "Organization" section and accept the invite there. Afterwards the Administrator will be able to see the user in his "Organization" section.
+ +From IntelOwl v4.1.0, Plugin Parameters and Secrets can be defined at the organization level, in the dedicated section. +This allows to share configurations between users of the same org while allowing complete multi-tenancy of the application. +Only Owners and Admins of the organization can set, change and delete them.
+The org admin can disable a specific plugin for all the users in a specific org. +To do that, Org Admins needs to go in the "Plugins" section and click the button "Enabled for organization" of the plugin that they want to disable.
+ +Since IntelOwl v4.2.0 we added a Registration Page that can be used to manage Registration requests when providing IntelOwl as a Service.
+After a user registration has been made, an email is sent to the user to verify their email address. If necessary, there are buttons on the login page to resend the verification email and to reset the password.
+Once the user has verified their email, they would be manually vetted before being allowed to use the IntelOwl platform. The registration requests would be handled in the Django Admin page by admins. +If you have IntelOwl deployed on an AWS instance with an IAM role you can use the SES service.
+To have the "Registration" page to work correctly, you must configure some variables before starting IntelOwl. See Optional Environment Configuration
+In a development environment the emails that would be sent are written to the standard output.
+Some analyzers which run in their own Docker containers are kept disabled by default. They are disabled by default to prevent accidentally starting too many containers and making your computer unresponsive.
+ +Name | +Analyzers | +Description | +
---|---|---|
Malware Tools Analyzers | +
+
|
+
+
|
+
TOR Analyzers | +Onionscan |
+Scans TOR .onion domains for privacy leaks and information disclosures. | +
CyberChef | +CyberChef |
+Run a transformation on a CyberChef server using pre-defined or custom recipes(rules that describe how the input has to be transformed). Check further instructions here | +
PCAP Analyzers | +Suricata |
+You can upload a PCAP to have it analyzed by Suricata with the open Ruleset. The result will provide a list of the triggered signatures plus a more detailed report with all the raw data generated by Suricata. You can also add your own rules (See paragraph "Analyzers with special configuration"). The installation is optimized for scaling so the execution time is really fast. | +
PhoneInfoga | +PhoneInfoga_scan |
+PhoneInfoga is one of the most advanced tools to scan international phone numbers. It allows you to first gather basic information such as country, area, carrier and line type, then use various techniques to try to find the VoIP provider or identify the owner. It works with a collection of scanners that must be configured in order for the tool to be effective. PhoneInfoga doesn't automate everything, it's just there to help investigating on phone numbers. here | +
Phishing Analyzers | +
+
|
+This framework tries to render a potential phishing page and extract useful information from it. Also, if the page contains a form, it tries to submit the form using fake data. The goal is to extract IOCs and check whether the page is real phishing or not. | +
To enable all the optional analyzers you can add the option --all_analyzers
when starting the project. Example:
Otherwise you can enable just one of the cited integration by using the related option. Example:
+ +Some analyzers provide the chance to customize the performed analysis based on parameters that are different for each analyzer.
+You can click on "Runtime Configuration" button in the "Scan" page and add the runtime configuration in the form of a dictionary. +Example:
+ +While using send_observable_analysis_request
or send_file_analysis_request
endpoints, you can pass the parameter runtime_configuration
with the optional values.
+Example:
runtime_configuration = {
+ "Doc_Info": {
+ "additional_passwords_to_check": ["passwd", "2020"]
+ }
+}
+pyintelowl_client.send_file_analysis_request(..., runtime_configuration=runtime_configuration)
+
PhoneInfoga provides several Scanners to extract as much information as possible from a given phone number. Those scanners may require authentication, so they are automatically skipped when no authentication credentials are found.
+By default the scanner used is local
.
+Go through this guide to initiate other required API keys related to this analyzer.
You can either use pre-defined recipes or create your own as +explained here.
+To use a pre-defined recipe, set the predefined_recipe_name
argument to the name of the recipe as
+defined here. Else, leave the predefined_recipe_name
argument empty and set
+the custom_recipe
argument to the contents of
+the recipe you want to
+use.
Additionally, you can also (optionally) set the output_type
argument.
[{"op": "To Decimal", "args": ["Space", False]}]
The framework aims to be extandable and provides two different playbooks connected through a pivot.
+The first playbook, named PhishingExtractor
, is in charge of extracting useful information from the web page rendered with Selenium-based browser.
+The second playbook is called PhishingAnalysis
and its main purposes are to extract useful insights on the page itself
+and to try to submit forms with fake data to extract other IOCs.
XPath syntax is used to find elements in the page. These selectors are customizable via the plugin's config page.
+The parameter xpath_form_selector
controls how the form is retrieved from the page and xpath_js_selector
is used to search
+for JavaScript inside the page.
A mapping is used in order to compile the page with fake data. This is due to the fact that most input tags of type "text"
+do not have a specific role in the page, so there must be some degree of approximation.
+This behaviour is controlled through *-mapping
parameters. They are a list that must contain the input tag's name to
+compile with fake data.
Here is an example of what a phishing investigation looks like started from PhishingExtractor
playbook:
+
Some analyzers could require a special configuration:
+GoogleWebRisk
: this analyzer needs a service account key with the Google Cloud credentials to work properly.
+ You should follow the official guide for creating the key.
+ Then you can populate the secret service_account_json
for that analyzer with the JSON of the service account file.ClamAV
: this Docker-based analyzer uses clamd
daemon as its scanner and is communicating with clamdscan
utility to scan files. The daemon requires 2 different configuration files: clamd.conf
(daemon's config) and freshclam.conf
(virus database updater's config). These files are mounted as docker volumes in /integrations/malware_tools_analyzers/clamav
and hence, can be edited by the user as per needs, without restarting the application. Moreover ClamAV is integrated with unofficial open source signatures extracted with Fangfrisch. The configuration file fangfrisch.conf
is mounted in the same directory and can be customized on your wish. For instance, you should change it if you want to integrate open source signatures from SecuriteInfoSuricata
: you can customize the behavior of Suricata:
/integrations/pcap_analyzers/config/suricata/rules
: here there are Suricata rules. You can change the custom.rules
files to add your own rules at any time. Once you made this change, you need to either restart IntelOwl or (this is faster) run a new analysis with the Suricata analyzer and set the parameter reload_rules
to true
./integrations/pcap_analyzers/config/suricata/etc
: here there are Suricata configuration files. Change it based on your wish. Restart IntelOwl to see the changes applied.Yara
:repositories
parameter and private_repositories
secret to download and use different rules from the default that IntelOwl currently support.repositories
values is what will be used to actually run the analysis: if you have added private repositories, remember to add the url in repositories
too!/opt/deploy/files_required/yara/YOUR_USERNAME/custom_rules/
. Please remember that these rules are not synced in a cluster deploy: for this reason is advised to upload them on GitHub and use the repositories
or private_repositories
attributes.Since v4, IntelOwl integrated the notification system from the certego_saas
package, allowing the admins to create notification that every user will be able to see.
The user would find the Notifications button on the top right of the page:
+ +There the user can read notifications provided by either the administrators or the IntelOwl Maintainers.
+As an Admin, if you want to add a notification to have it sent to all the users, you have to login to the Django Admin interface, go to the "Notifications" section and add it there.
+While adding a new notification, in the body
section it is possible to even use HTML syntax, allowing to embed images, links, etc;
+in the app_name field
, please remember to use intelowl
as the app name.
Everytime a new release is installed, once the backend goes up it will automatically create a new notification, +having as content the latest changes described in the CHANGELOG.md, +allowing the users to keep track of the changes inside intelowl itself.
+ask_analysis_availability
API endpoint to check for existing analysis based on an MD5 hash.
+This endpoint helps avoid redundant analysis by checking if there is already an analysis +in progress or completed with status "running" or "reported_without_fails" for the provided MD5 hash. +The analyzers that need to be executed should be specified to ensure expected results.
+Deprecated: This endpoint will be deprecated after 01-07-2023.
+Parameters: +- request (POST): Contains the MD5 hash and analyzer details.
+Returns: +- 200: JSON response with the analysis status, job ID, and analyzers to be executed.
+docs/Submodules/IntelOwl/api_app/views.py
ask_multi_analysis_availability
API endpoint to check for existing analysis for multiple MD5 hashes.
+Similar to ask_analysis_availability
, this endpoint checks for existing analysis for multiple MD5 hashes.
+It prevents redundant analysis by verifying if there are any jobs in progress or completed with status
+"running" or "reported_without_fails". The analyzers required should be specified to ensure accurate results.
Parameters: +- request (POST): Contains multiple MD5 hashes and analyzer details.
+Returns: +- 200: JSON response with the analysis status, job IDs, and analyzers to be executed for each MD5 hash.
+docs/Submodules/IntelOwl/api_app/views.py
analyze_file
API endpoint to start an analysis job for a single file.
+This endpoint initiates an analysis job for a single file and sends it to the +specified analyzers. The file-related information and analyzers should be provided +in the request data.
+Parameters: +- request (POST): Contains file data and analyzer details.
+Returns: +- 200: JSON response with the job details after initiating the analysis.
+docs/Submodules/IntelOwl/api_app/views.py
analyze_multiple_files
API endpoint to start analysis jobs for multiple files.
+This endpoint initiates analysis jobs for multiple files and sends them to the specified analyzers. +The file-related information and analyzers should be provided in the request data.
+Parameters: +- request (POST): Contains multiple file data and analyzer details.
+Returns: +- 200: JSON response with the job details for each initiated analysis.
+docs/Submodules/IntelOwl/api_app/views.py
analyze_observable
API endpoint to start an analysis job for a single observable.
+This endpoint initiates an analysis job for a single observable (e.g., domain, IP, URL, etc.) +and sends it to the specified analyzers. The observable-related information and analyzers should be +provided in the request data.
+Parameters: +- request (POST): Contains observable data and analyzer details.
+Returns: +- 200: JSON response with the job details after initiating the analysis.
+docs/Submodules/IntelOwl/api_app/views.py
analyze_multiple_observables
API endpoint to start analysis jobs for multiple observables.
+This endpoint initiates analysis jobs for multiple observables (e.g., domain, IP, URL, etc.) +and sends them to the specified analyzers. The observables and analyzer details should +be provided in the request data.
+Parameters: +- request (POST): Contains multiple observable data and analyzer details.
+Returns: +- 200: JSON response with the job details for each initiated analysis.
+docs/Submodules/IntelOwl/api_app/views.py
CommentViewSet
+ Bases: ModelViewSet
CommentViewSet provides the following actions:
+Permissions: +- IsAuthenticated: Requires authentication for all actions. +- IsObjectUserPermission: Allows only the comment owner to update or delete the comment. +- IsObjectUserOrSameOrgPermission: Allows the comment owner or anyone in the same organization to retrieve the comment.
+Queryset: +- Filters comments to include only those associated with jobs visible to the authenticated user.
+docs/Submodules/IntelOwl/api_app/views.py
325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 |
|
get_permissions()
+Customizes permissions based on the action being performed.
+destroy
, update
, and partial_update
actions, adds IsObjectUserPermission
to ensure that only
+ the comment owner can perform these actions.retrieve
action, adds IsObjectUserOrSameOrgPermission
to allow the comment owner or anyone in the same
+ organization to retrieve the comment.Returns: +- List of applicable permissions.
+docs/Submodules/IntelOwl/api_app/views.py
get_queryset()
+Filters the queryset to include only comments related to jobs visible to the authenticated user.
+Returns: +- Filtered queryset of comments.
+docs/Submodules/IntelOwl/api_app/views.py
JobViewSet
+ Bases: ReadAndDeleteOnlyViewSet
, SerializerActionMixin
JobViewSet provides the following actions:
+Permissions: +- IsAuthenticated: Requires authentication for all actions. +- IsObjectUserOrSameOrgPermission: Allows job deletion or killing only by the job owner or anyone in the same organization.
+Queryset: +- Prefetches related tags and orders jobs by request time, filtered to include only jobs visible to the authenticated user.
+docs/Submodules/IntelOwl/api_app/views.py
395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 +564 +565 +566 +567 +568 +569 +570 +571 +572 +573 +574 +575 +576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 +600 +601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 +613 +614 +615 +616 +617 +618 +619 +620 +621 +622 +623 +624 +625 +626 +627 +628 +629 +630 +631 +632 +633 +634 +635 +636 +637 +638 +639 +640 +641 +642 +643 +644 +645 +646 +647 +648 +649 +650 +651 +652 +653 +654 +655 +656 +657 +658 +659 +660 +661 +662 +663 +664 +665 +666 +667 +668 +669 +670 +671 +672 +673 +674 +675 +676 +677 +678 +679 +680 +681 +682 +683 +684 +685 +686 +687 +688 +689 +690 +691 +692 +693 +694 +695 +696 +697 +698 +699 +700 +701 +702 +703 +704 +705 +706 +707 +708 +709 +710 +711 +712 +713 +714 +715 +716 +717 +718 +719 +720 +721 +722 +723 +724 +725 +726 +727 +728 +729 +730 +731 +732 +733 +734 +735 +736 +737 +738 +739 +740 +741 +742 +743 +744 +745 +746 +747 +748 +749 +750 +751 +752 +753 +754 +755 +756 +757 +758 +759 +760 +761 +762 +763 +764 +765 +766 +767 +768 +769 +770 +771 +772 +773 +774 +775 +776 +777 +778 +779 +780 +781 +782 +783 +784 +785 +786 +787 +788 +789 +790 +791 +792 +793 +794 +795 +796 +797 +798 +799 +800 +801 +802 +803 +804 +805 +806 +807 +808 +809 +810 +811 +812 +813 +814 +815 +816 +817 +818 +819 +820 +821 +822 +823 +824 +825 +826 +827 +828 +829 +830 +831 +832 +833 +834 +835 +836 +837 +838 +839 +840 +841 +842 +843 +844 +845 +846 +847 +848 +849 +850 +851 +852 +853 +854 +855 +856 +857 +858 +859 +860 +861 +862 +863 +864 +865 +866 +867 +868 +869 +870 +871 +872 +873 +874 +875 +876 +877 +878 +879 +880 +881 +882 +883 +884 +885 +886 +887 +888 +889 +890 +891 +892 +893 +894 +895 +896 +897 +898 +899 +900 +901 +902 +903 +904 +905 +906 +907 +908 +909 +910 +911 +912 +913 |
|
__aggregation_response_dynamic(field_name, group_by_date=True, limit=5, users=None)
+Dynamically aggregate Job objects based on a specified field and time range.
+This method identifies the most frequent values of a given field within +a specified time range and aggregates the Job objects accordingly. +Optionally, it can group the results by date and limit the number of +most frequent values.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+field_name
+ |
+
+str
+ |
+
+
+
+The name of the field to aggregate by. + |
++required + | +
+group_by_date
+ |
+
+bool
+ |
+
+
+
+Whether to group the results by date. Defaults to True. + |
+
+True
+ |
+
+limit
+ |
+
+int
+ |
+
+
+
+The maximum number of most frequent values to retrieve. Defaults to 5. + |
+
+5
+ |
+
+users
+ |
+
+list
+ |
+
+
+
+A list of users to filter the Job objects by. + |
+
+None
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
Response |
+Response
+ |
+
+
+
+A Django REST framework Response object containing the most frequent values + |
+
+Response
+ |
+
+
+
+and the aggregated data. + |
+
docs/Submodules/IntelOwl/api_app/views.py
813 +814 +815 +816 +817 +818 +819 +820 +821 +822 +823 +824 +825 +826 +827 +828 +829 +830 +831 +832 +833 +834 +835 +836 +837 +838 +839 +840 +841 +842 +843 +844 +845 +846 +847 +848 +849 +850 +851 +852 +853 +854 +855 +856 +857 +858 +859 +860 +861 +862 +863 +864 +865 +866 +867 +868 +869 +870 +871 +872 +873 +874 +875 +876 +877 +878 +879 +880 +881 +882 +883 +884 +885 +886 +887 +888 +889 +890 +891 |
|
__aggregation_response_static(annotations, users=None)
+Generate a static aggregation of Job objects filtered by a time range.
+This method applies the provided annotations to aggregate Job objects +within the specified time range. Optionally, it filters the results by +the given list of users.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+annotations
+ |
+
+dict
+ |
+
+
+
+Annotations to apply for the aggregation. + |
++required + | +
+users
+ |
+
+list
+ |
+
+
+
+A list of users to filter the Job objects by. + |
+
+None
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
Response |
+Response
+ |
+
+
+
+A Django REST framework Response object containing the aggregated data. + |
+
docs/Submodules/IntelOwl/api_app/views.py
__parse_range(request)
+
+staticmethod
+
+Parse the time range from the request query parameters.
+This method attempts to extract the 'range' query parameter from the +request. If the parameter is not provided, it defaults to '7d' (7 days).
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+request
+ |
++ | +
+
+
+The HTTP request object containing query parameters. + |
++required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
tuple | + | +
+
+
+A tuple containing the parsed time delta and the basis for date truncation. + |
+
docs/Submodules/IntelOwl/api_app/views.py
aggregate_file_mimetype(request)
+Aggregate jobs by file MIME type.
+Returns: +- Aggregated count of jobs for each MIME type.
+docs/Submodules/IntelOwl/api_app/views.py
aggregate_md5(request)
+Aggregate jobs by MD5 hash.
+Returns: +- Aggregated count of jobs for each MD5 hash.
+docs/Submodules/IntelOwl/api_app/views.py
aggregate_observable_classification(request)
+Aggregate jobs by observable classification.
+Returns: +- Aggregated count of jobs for each observable classification.
+docs/Submodules/IntelOwl/api_app/views.py
aggregate_observable_name(request)
+Aggregate jobs by observable name.
+Returns: +- Aggregated count of jobs for each observable name.
+docs/Submodules/IntelOwl/api_app/views.py
aggregate_status(request)
+Aggregate jobs by their status.
+Returns: +- Aggregated count of jobs for each status.
+docs/Submodules/IntelOwl/api_app/views.py
aggregate_type(request)
+Aggregate jobs by type (file or observable).
+Returns: +- Aggregated count of jobs for each type.
+docs/Submodules/IntelOwl/api_app/views.py
download_sample(request, pk=None)
+Download a sample associated with a job.
+If the job does not have a sample, raises a validation error.
+Returns: +- The file associated with the job as an attachment.
+:param url: pk (job_id) +:returns: bytes
+docs/Submodules/IntelOwl/api_app/views.py
get_org_members(request)
+
+staticmethod
+
+Retrieve members of the organization associated with the authenticated user.
+If the 'org' query parameter is set to 'true', this method returns all +users who are members of the authenticated user's organization.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+request
+ |
++ | +
+
+
+The HTTP request object containing user information and query parameters. + |
++required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+list or None: A list of users who are members of the user's organization + |
+
+ | +
+
+
+if the 'org' query parameter is 'true', otherwise None. + |
+
docs/Submodules/IntelOwl/api_app/views.py
get_permissions()
+Customizes permissions based on the action being performed.
+destroy
and kill
actions, adds IsObjectUserOrSameOrgPermission
to ensure that only
+ the job owner or anyone in the same organization can perform these actions.Returns: +- List of applicable permissions.
+docs/Submodules/IntelOwl/api_app/views.py
get_queryset()
+Filters the queryset to include only jobs visible to the authenticated user, ordered by request time.
+Logs the request parameters and returns the filtered queryset.
+Returns: +- Filtered queryset of jobs.
+docs/Submodules/IntelOwl/api_app/views.py
kill(request, pk=None)
+Kill a running job by closing celery tasks and marking the job as killed.
+If the job is not running, raises a validation error.
+Returns: +- No content (204) if the job is successfully killed.
+docs/Submodules/IntelOwl/api_app/views.py
pivot(request, pk=None, pivot_config_pk=None)
+Perform a pivot operation from a job's reports based on a specified pivot configuration.
+Expects the following parameters:
+- pivot_config_pk
: The primary key of the pivot configuration to use.
Returns: +- List of job IDs created as a result of the pivot.
+docs/Submodules/IntelOwl/api_app/views.py
recent_scans(request)
+Retrieve recent jobs based on an MD5 hash, filtered by a maximum temporal distance.
+Expects the following parameters in the request data:
+- md5
: The MD5 hash to filter jobs by.
+- max_temporal_distance
: The maximum number of days to look back for recent jobs (default is 14 days).
Returns: +- List of recent jobs matching the MD5 hash.
+docs/Submodules/IntelOwl/api_app/views.py
recent_scans_user(request)
+Retrieve recent jobs for the authenticated user, filtered by sample status.
+Expects the following parameters in the request data:
+- is_sample
: Whether to filter jobs by sample status (required).
+- limit
: The maximum number of recent jobs to return (default is 5).
Returns: +- List of recent jobs for the user.
+docs/Submodules/IntelOwl/api_app/views.py
retry(request, pk=None)
+Retry a job if its status is in a final state.
+If the job is currently running, raises a validation error.
+Returns: +- No content (204) if the job is successfully retried.
+docs/Submodules/IntelOwl/api_app/views.py
TagViewSet
+ Bases: ModelViewSet
A viewset that provides CRUD (Create, Read, Update, Delete) operations
+for the Tag
model.
This viewset leverages Django REST framework's ModelViewSet
to handle
+requests for the Tag
model. It includes the default implementations
+for list
, retrieve
, create
, update
, partial_update
, and destroy
actions.
Attributes:
+Name | +Type | +Description | +
---|---|---|
queryset |
+
+QuerySet
+ |
+
+
+
+The queryset that retrieves all Tag objects from the database. + |
+
serializer_class |
+
+Serializer
+ |
+
+
+
+The serializer class used to convert Tag model instances to JSON and vice versa. + |
+
pagination_class |
++ | +
+
+
+Pagination is disabled for this viewset. + |
+
docs/Submodules/IntelOwl/api_app/views.py
ModelWithOwnershipViewSet
+ Bases: ModelViewSet
A viewset that enforces ownership-based access control for models.
+This class extends the functionality of ModelViewSet
to restrict access to
+objects based on ownership. It modifies the queryset for the list
action
+to only include objects visible to the requesting user, and adds custom
+permission checks for destroy
and update
actions.
Methods:
+Name | +Description | +
---|---|
get_queryset |
+
+
+
+Returns the queryset of the model, filtered for visibility
+ to the requesting user during the |
+
get_permissions |
+
+
+
+Returns the permissions required for the current action,
+ with additional checks for ownership during |
+
docs/Submodules/IntelOwl/api_app/views.py
get_permissions()
+Retrieves the permissions required for the current action.
+For the destroy
and update
actions, additional checks are performed to
+ensure that only object owners or admins can perform these actions. Raises
+a PermissionDenied
exception for PUT
requests.
Returns:
+Name | Type | +Description | +
---|---|---|
list | + | +
+
+
+A list of permission instances. + |
+
docs/Submodules/IntelOwl/api_app/views.py
get_queryset()
+Retrieves the queryset for the viewset, modifying it for the list
action
+to only include objects visible to the requesting user.
Returns:
+Name | Type | +Description | +
---|---|---|
QuerySet | + | +
+
+
+The queryset of the model, possibly filtered for visibility. + |
+
docs/Submodules/IntelOwl/api_app/views.py
PluginConfigViewSet
+ Bases: ModelWithOwnershipViewSet
A viewset for managing PluginConfig
objects with ownership-based access control.
This viewset extends ModelWithOwnershipViewSet
to handle PluginConfig
objects,
+allowing users to list, retrieve, and delete configurations while ensuring that only
+authorized configurations are accessible. It customizes the queryset to exclude default
+values and orders the configurations by ID.
Attributes:
+Name | +Type | +Description | +
---|---|---|
serializer_class |
+
+class
+ |
+
+
+
+The serializer class used for |
+
pagination_class |
+
+class
+ |
+
+
+
+Specifies that pagination is not applied. + |
+
queryset |
+
+QuerySet
+ |
+
+
+
+The queryset for |
+
Methods:
+Name | +Description | +
---|---|
get_queryset |
+
+
+
+Returns the queryset for |
+
docs/Submodules/IntelOwl/api_app/views.py
get_queryset()
+Retrieves the queryset for PluginConfig
objects, excluding those with default values
+(where the owner is NULL
) and ordering the remaining objects by ID.
Returns:
+Name | Type | +Description | +
---|---|---|
QuerySet | + | +
+
+
+The filtered and ordered queryset of |
+
docs/Submodules/IntelOwl/api_app/views.py
PythonReportActionViewSet
+ Bases: GenericViewSet
A base view set for handling actions related to plugin reports.
+This view set provides methods for killing and retrying plugin reports,
+and requires users to have appropriate permissions based on the
+IsObjectUserOrSameOrgPermission
.
Attributes:
+Name | +Type | +Description | +
---|---|---|
permission_classes |
+
+list
+ |
+
+
+
+List of permission classes to apply. + |
+
Methods: +get_queryset: Returns the queryset of reports based on the model class. +get_object: Retrieves a specific report object by job_id and report_id. +perform_kill: Kills a running plugin by terminating its Celery task and marking it as killed. +perform_retry: Retries a failed or killed plugin run. +kill: Handles the endpoint to kill a specific report. +retry: Handles the endpoint to retry a specific report.
+docs/Submodules/IntelOwl/api_app/views.py
1080 +1081 +1082 +1083 +1084 +1085 +1086 +1087 +1088 +1089 +1090 +1091 +1092 +1093 +1094 +1095 +1096 +1097 +1098 +1099 +1100 +1101 +1102 +1103 +1104 +1105 +1106 +1107 +1108 +1109 +1110 +1111 +1112 +1113 +1114 +1115 +1116 +1117 +1118 +1119 +1120 +1121 +1122 +1123 +1124 +1125 +1126 +1127 +1128 +1129 +1130 +1131 +1132 +1133 +1134 +1135 +1136 +1137 +1138 +1139 +1140 +1141 +1142 +1143 +1144 +1145 +1146 +1147 +1148 +1149 +1150 +1151 +1152 +1153 +1154 +1155 +1156 +1157 +1158 +1159 +1160 +1161 +1162 +1163 +1164 +1165 +1166 +1167 +1168 +1169 +1170 +1171 +1172 +1173 +1174 +1175 +1176 +1177 +1178 +1179 +1180 +1181 +1182 +1183 +1184 +1185 +1186 +1187 +1188 +1189 +1190 +1191 +1192 +1193 +1194 +1195 +1196 +1197 +1198 +1199 +1200 +1201 +1202 +1203 +1204 +1205 +1206 +1207 +1208 +1209 +1210 +1211 +1212 +1213 +1214 +1215 +1216 +1217 +1218 +1219 +1220 +1221 +1222 +1223 +1224 +1225 +1226 +1227 +1228 +1229 +1230 +1231 +1232 +1233 +1234 +1235 +1236 +1237 +1238 +1239 +1240 +1241 +1242 +1243 +1244 +1245 +1246 +1247 +1248 +1249 +1250 +1251 +1252 +1253 +1254 +1255 +1256 +1257 +1258 +1259 +1260 +1261 +1262 +1263 +1264 +1265 +1266 +1267 +1268 +1269 +1270 +1271 +1272 +1273 +1274 +1275 +1276 +1277 +1278 +1279 +1280 +1281 +1282 +1283 +1284 +1285 +1286 +1287 +1288 +1289 +1290 +1291 +1292 +1293 +1294 +1295 +1296 +1297 +1298 +1299 +1300 +1301 +1302 +1303 +1304 |
|
report_model
+
+abstractmethod
+classmethod
+property
+
+Abstract property that should return the model class for the report.
+Subclasses must implement this property to specify the model +class for the reports being handled by this view set.
+Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+Type[AbstractReport]: The model class for the report. + |
+
Raises:
+Type | +Description | +
---|---|
+NotImplementedError
+ |
+
+
+
+If not overridden by a subclass. + |
+
get_object(job_id, report_id)
+Retrieves a specific report object by job_id and report_id.
+Overrides the drf's default get_object
method to fetch a report object
+based on job_id and report_id, and checks the permissions for the object.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+job_id
+ |
+
+int
+ |
+
+
+
+The ID of the job associated with the report. + |
++required + | +
+report_id
+ |
+
+int
+ |
+
+
+
+The ID of the report. + |
++required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
AbstractReport |
+AbstractReport
+ |
+
+
+
+The report object. + |
+
Raises:
+Type | +Description | +
---|---|
+NotFound
+ |
+
+
+
+If the report does not exist. + |
+
docs/Submodules/IntelOwl/api_app/views.py
get_queryset()
+Returns the queryset of reports based on the model class.
+Filters the queryset to return all instances of the report model.
+Returns:
+Name | Type | +Description | +
---|---|---|
QuerySet | + | +
+
+
+A queryset of all report instances. + |
+
docs/Submodules/IntelOwl/api_app/views.py
kill(request, job_id, report_id)
+Kills a specific report by terminating its Celery task and marking it as killed.
+This endpoint handles the patch request to kill a report if its status is +running or pending.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+request
+ |
+
+HttpRequest
+ |
+
+
+
+The request object containing the HTTP PATCH request. + |
++required + | +
+job_id
+ |
+
+int
+ |
+
+
+
+The ID of the job associated with the report. + |
++required + | +
+report_id
+ |
+
+int
+ |
+
+
+
+The ID of the report. + |
++required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
Response | + | +
+
+
+HTTP 204 No Content if successful. + |
+
Raises:
+Type | +Description | +
---|---|
+ValidationError
+ |
+
+
+
+If the report is not in a valid state for killing. + |
+
docs/Submodules/IntelOwl/api_app/views.py
perform_kill(report)
+
+staticmethod
+
+Kills a running plugin by terminating its Celery task and marking it as killed.
+This method is a callback for performing additional actions after a +kill operation, including updating the report status and cleaning up +the associated job.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+report
+ |
+
+AbstractReport
+ |
+
+
+
+The report to be killed. + |
++required + | +
docs/Submodules/IntelOwl/api_app/views.py
perform_retry(report)
+
+staticmethod
+
+Retries a failed or killed plugin run.
+This method clears the errors and re-runs the plugin with the same arguments. +It fetches the appropriate task signature and schedules the job again.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+report
+ |
+
+AbstractReport
+ |
+
+
+
+The report to be retried. + |
++required + | +
Raises:
+Type | +Description | +
---|---|
+RuntimeError
+ |
+
+
+
+If unable to find a valid task signature for the report. + |
+
docs/Submodules/IntelOwl/api_app/views.py
retry(request, job_id, report_id)
+Retries a failed or killed plugin run.
+This method clears the errors and re-runs the plugin with the same arguments. +It fetches the appropriate task signature and schedules the job again.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+report
+ |
+
+AbstractReport
+ |
+
+
+
+The report to be retried. + |
++required + | +
Raises:
+Type | +Description | +
---|---|
+RuntimeError
+ |
+
+
+
+If unable to find a valid task signature for the report. + |
+
docs/Submodules/IntelOwl/api_app/views.py
AbstractConfigViewSet
+ Bases: PaginationMixin
, ReadOnlyModelViewSet
A base view set for handling plugin configuration actions.
+This view set provides methods for enabling and disabling plugins +within an organization. It requires users to be authenticated and +to have appropriate permissions.
+Attributes:
+Name | +Type | +Description | +
---|---|---|
permission_classes |
+
+list
+ |
+
+
+
+List of permission classes to apply. + |
+
ordering |
+
+list
+ |
+
+
+
+Default ordering for the queryset. + |
+
lookup_field |
+
+str
+ |
+
+
+
+Field to look up in the URL. + |
+
Methods:
+Name | +Description | +
---|---|
disable_in_org |
+
+
+
+Disables the plugin for the organization of the authenticated user. + |
+
enable_in_org |
+
+
+
+Enables the plugin for the organization of the authenticated user. + |
+
docs/Submodules/IntelOwl/api_app/views.py
1307 +1308 +1309 +1310 +1311 +1312 +1313 +1314 +1315 +1316 +1317 +1318 +1319 +1320 +1321 +1322 +1323 +1324 +1325 +1326 +1327 +1328 +1329 +1330 +1331 +1332 +1333 +1334 +1335 +1336 +1337 +1338 +1339 +1340 +1341 +1342 +1343 +1344 +1345 +1346 +1347 +1348 +1349 +1350 +1351 +1352 +1353 +1354 +1355 +1356 +1357 +1358 +1359 +1360 +1361 +1362 +1363 +1364 +1365 +1366 +1367 +1368 +1369 +1370 +1371 +1372 +1373 +1374 +1375 +1376 +1377 +1378 +1379 +1380 +1381 +1382 +1383 +1384 +1385 +1386 +1387 +1388 +1389 +1390 +1391 +1392 +1393 +1394 +1395 +1396 +1397 +1398 |
|
disable_in_org(request, name=None)
+Disables the plugin for the organization of the authenticated user.
+Only organization admins can disable the plugin. If the plugin is +already disabled, a validation error is raised.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+request
+ |
+
+Request
+ |
+
+
+
+The HTTP request object. + |
++required + | +
+name
+ |
+
+str
+ |
+
+
+
+The name of the plugin. Defaults to None. + |
+
+None
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
Response | + | +
+
+
+HTTP response indicating the success or failure of the operation. + |
+
docs/Submodules/IntelOwl/api_app/views.py
enable_in_org(request, name=None)
+Enables the plugin for the organization of the authenticated user.
+Only organization admins can enable the plugin. If the plugin is +already enabled, a validation error is raised.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+request
+ |
+
+Request
+ |
+
+
+
+The HTTP request object. + |
++required + | +
+name
+ |
+
+str
+ |
+
+
+
+The name of the plugin. Defaults to None. + |
+
+None
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
Response | + | +
+
+
+HTTP response indicating the success or failure of the operation. + |
+
docs/Submodules/IntelOwl/api_app/views.py
PythonConfigViewSet
+ Bases: AbstractConfigViewSet
A view set for handling actions related to Python plugin configurations.
+This view set provides methods to perform health checks and pull updates
+for Python-based plugins. It inherits from AbstractConfigViewSet
and
+requires users to be authenticated.
Attributes:
+Name | +Type | +Description | +
---|---|---|
serializer_class |
+
+class
+ |
+
+
+
+Serializer class for the view set. + |
+
Methods:
+Name | +Description | +
---|---|
health_check |
+
+
+
+Checks if the server instance associated with the plugin is up. + |
+
pull |
+
+
+
+Pulls updates for the plugin. + |
+
docs/Submodules/IntelOwl/api_app/views.py
1401 +1402 +1403 +1404 +1405 +1406 +1407 +1408 +1409 +1410 +1411 +1412 +1413 +1414 +1415 +1416 +1417 +1418 +1419 +1420 +1421 +1422 +1423 +1424 +1425 +1426 +1427 +1428 +1429 +1430 +1431 +1432 +1433 +1434 +1435 +1436 +1437 +1438 +1439 +1440 +1441 +1442 +1443 +1444 +1445 +1446 +1447 +1448 +1449 +1450 +1451 +1452 +1453 +1454 +1455 +1456 +1457 +1458 +1459 +1460 +1461 +1462 +1463 +1464 +1465 +1466 +1467 +1468 +1469 +1470 +1471 +1472 +1473 +1474 +1475 +1476 +1477 +1478 +1479 +1480 +1481 +1482 +1483 +1484 +1485 +1486 +1487 +1488 +1489 +1490 +1491 +1492 +1493 +1494 +1495 +1496 +1497 +1498 +1499 +1500 +1501 +1502 +1503 +1504 +1505 +1506 +1507 +1508 +1509 +1510 +1511 +1512 +1513 +1514 +1515 +1516 +1517 +1518 +1519 +1520 +1521 +1522 +1523 +1524 +1525 +1526 +1527 |
|
get_queryset()
+Returns a queryset of all PythonConfig instances with related +python_module parameters pre-fetched.
+Returns:
+Name | Type | +Description | +
---|---|---|
QuerySet | + | +
+
+
+A queryset of PythonConfig instances. + |
+
docs/Submodules/IntelOwl/api_app/views.py
health_check(request, name=None)
+Checks the health of the server instance associated with the plugin.
+This method attempts to check if the plugin's server instance is
+up and running. It uses the health_check
method of the plugin's
+Python class.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+request
+ |
+
+Request
+ |
+
+
+
+The HTTP request object. + |
++required + | +
+name
+ |
+
+str
+ |
+
+
+
+The name of the plugin. Defaults to None. + |
+
+None
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
Response | + | +
+
+
+HTTP response with the health status of the plugin. + |
+
Raises:
+Type | +Description | +
---|---|
+ValidationError
+ |
+
+
+
+If no health check is implemented or if an + unexpected exception occurs. + |
+
docs/Submodules/IntelOwl/api_app/views.py
1433 +1434 +1435 +1436 +1437 +1438 +1439 +1440 +1441 +1442 +1443 +1444 +1445 +1446 +1447 +1448 +1449 +1450 +1451 +1452 +1453 +1454 +1455 +1456 +1457 +1458 +1459 +1460 +1461 +1462 +1463 +1464 +1465 +1466 +1467 +1468 +1469 +1470 +1471 +1472 +1473 +1474 +1475 +1476 +1477 +1478 +1479 +1480 +1481 +1482 +1483 +1484 |
|
pull(request, name=None)
+Pulls updates for the plugin.
+This method attempts to pull updates for the plugin by calling
+the update
method of the plugin's Python class. It also handles
+any exceptions that occur during this process.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+request
+ |
+
+Request
+ |
+
+
+
+The HTTP request object. + |
++required + | +
+name
+ |
+
+str
+ |
+
+
+
+The name of the plugin. Defaults to None. + |
+
+None
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
Response | + | +
+
+
+HTTP response with the update status of the plugin. + |
+
Raises:
+Type | +Description | +
---|---|
+ValidationError
+ |
+
+
+
+If the update is not implemented or if an + unexpected exception occurs. + |
+
docs/Submodules/IntelOwl/api_app/views.py
plugin_state_viewer
View to retrieve the state of plugin configurations for the requesting user’s organization.
+This endpoint is accessible only to users with an active membership in an organization. +It returns a JSON response with the state of each plugin configuration, specifically +indicating whether each plugin is disabled.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+request
+ |
+
+HttpRequest
+ |
+
+
+
+The request object containing the HTTP GET request. + |
++required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
Response | + | +
+
+
+A JSON response with the state of each plugin configuration, + indicating whether it is disabled or not. + |
+
Raises:
+Type | +Description | +
---|---|
+PermissionDenied
+ |
+
+
+
+If the requesting user does not belong to any organization. + |
+
docs/Submodules/IntelOwl/api_app/views.py
There are a lot of different ways you could choose to contribute to the IntelOwl Project:
+Intel Owl welcomes contributors from anywhere and from any kind of education or skill level. We strive to create a community of developers that is welcoming, friendly and right.
+For this reason it is important to follow some easy rules based on a simple but important concept: Respect.
+Keeping to a consistent code style throughout the project makes it easier to contribute and collaborate. We make use of psf/black
and isort for code formatting and flake8
for style guides.
This guide assumes that you have already performed the steps required to install the project. If not, please do it (Installation Guide).
+Create a personal fork of the project on Github. +Then, please create a new branch based on the develop branch that contains the most recent changes. This is mandatory.
+git checkout -b myfeature develop
Then we strongly suggest to configure pre-commit to force linters on every commits you perform
+# From the project directory
+python3 -m venv venv
+source venv/bin/activate
+# from the project base directory
+pip install pre-commit
+pre-commit install
+
+# create .env file for controlling repo_downloader.sh
+# (to speed up image builds during development: it avoid downloading some repos)
+cp docker/.env.start.test.template docker/.env.start.test
+
+# set STAGE env variable to "local"
+sed -i "s/STAGE=\"production\"/STAGE=\"local\"/g" docker/env_file_app
+
Now, you can execute IntelOwl in development mode by selecting the mode test
while launching the startup script:
Every time you perform a change, you should perform an operation to reflect the changes into the application:
+uwsgi
while using the test
modeIf you made any changes to an existing model/serializer/view, please run the following command to generate a new version of the API schema and docs:
+docker exec -it intelowl_uwsgi python manage.py spectacular --file docs/source/schema.yml && make html
+
To start the frontend in "develop" mode, you can execute the startup npm script within the folder frontend
:
cd frontend/
+# Install
+npm i
+# Start
+DANGEROUSLY_DISABLE_HOST_CHECK=true npm start
+# See https://create-react-app.dev/docs/proxying-api-requests-in-development/#invalid-host-header-errors-after-configuring-proxy for why we use that flag in development mode
+
Most of the time you would need to test the changes you made together with the backend. In that case, you would need to run the backend locally too:
+ +Note
+prod
would be faster because you would leverage the official images and you won't need to build the backend locally. In case you would need to test backend changes too at the same time, please use test
and refer to the previous section of the documentation.proxy
in the frontend/package.json
configurationThe IntelOwl Frontend is tightly linked to the certego-ui
library. Most of the React components are imported from there. Because of this, it may happen that, during development, you would need to work on that library too.
+To install the certego-ui
library, please take a look to npm link and remember to start certego-ui without installing peer dependencies (to avoid conflicts with IntelOwl dependencies):
git clone https://github.com/certego/certego-ui.git
+# change directory to the folder where you have the cloned the library
+cd certego-ui/
+# install, without peer deps (to use packages of IntelOwl)
+npm i --legacy-peer-deps
+# create link to the project (this will globally install this package)
+sudo npm link
+# compile the library
+npm start
+
Then, open another command line tab, create a link in the frontend
to the certego-ui
and re-install and re-start the frontend application (see previous section):
This trick will allow you to see reflected every changes you make in the certego-ui
directly in the running frontend
application.
The certego-ui
application comes with an example project that showcases the components that you can re-use and import to other projects, like IntelOwl:
# To have the Example application working correctly, be sure to have installed `certego-ui` *without* the `--legacy-peer-deps` option and having it started in another command line
+cd certego-ui/
+npm i
+npm start
+# go to another tab
+cd certego-ui/example/
+npm i
+npm start
+
IntelOwl was designed to ease the addition of new plugins. With a simple python script you can integrate your own engine or integrate an external service in a short time.
+There are two possible cases:
+If you are doing the step number 2
, you can skip this paragraph.
First, you need to create the python code that will be actually executed. You can easily take other plugins as example to write this.
+Then, you have to create a Python Module
model. You can do this in the Django Admin
page:
+You have to specify which type of Plugin you wrote, and its python module. Again, you can use as an example an already configured Python Module
.
Some Python Module
requires to update some part of its code in a schedule way: for example Yara
requires to update the rule repositories, QuarkEngine
to update its database and so on.
+If the Python Module
that you define need this type of behaviour, you have to configure two things:
update
and put the updating logic (see other plugins for examples) there.update_schedule
(crontab syntax) that define when the update should be executed.Some Python Module
requires further check to see if the service provider is able to answer requests; for example if you have done too many requests, or the website is currently down for maintenance and so on.
+If the Python Module
that you define need this type of behaviour, you have to configure two things:
health_check
and put there the custom health check logic. As default, plugins will try to make an HTTP HEAD
request to the configured url (the Plugin must have a url
attribute).health_check_schedule
(crontab syntax) that define when the health check should be executed.Press Save and continue editing
to, at the moment, manually ad the Parameters
that the python code requires (the class attributes that you needed):
_
wil be prepended to the name)string
, list
, dict
, integer
, boolean
, float
true
or false
, meaning that a value is necessary to allow the run of the analyzertrue
or false
At this point, you can follow the specific guide for each plugin
+You may want to look at a few existing examples to start to build a new one, such as:
+FREE_TO_USE_ANALYZERS
playbook. To do this you have to make a migration file; you can use 0026_add_mmdb_analyzer_free_to_use
as a template.After having written the new python module, you have to remember to:
+file_analyzers
or observable_analyzers
directory based on what it can analyze_monkeypatch()
in its class to create automated tests for the new analyzer. This is a trick to have tests in the same class of its analyzer.Analyzers_manager/AnalyzerConfigs
(* = mandatory, ~ = mandatory on conditions)observable
or file
type
is observable
type
is file
and not supported filetypes
is emptyrun hash
is True
type
is file
and supported filetypes
is emptyIf the analyzer you wish to integrate doesn't exist as a public API or python package, it should be integrated with its own docker image +which can be queried from the main Django app.
+./integrations/<analyzer_name>/Dockerfile
.compose.yml
for production and compose-tests.yml
for testing should be placed under ./integrations/<analyzer_name>
.docker/env_file_integrations_template
.You may want to look at a few existing examples to start to build a new one:
+After having written the new python module, you have to remember to:
+connectors
directory_monkeypatch()
in its class to create automated tests for the new connector. This is a trick to have tests in the same class of its connector.Connectors_manager/ConnectorConfigs
(* = mandatory, ~ = mandatory on conditions)ingestors
directory_monkeypatch()
in its class to create automated tests for the new ingestor. This is a trick to have tests in the same class of its ingestor.Ingestors_manager/IngestorConfigs
(* = mandatory, ~ = mandatory on conditions)plus
symbol.pivots
directory_monkeypatch()
in its class to create automated tests for the new pivot. This is a trick to have tests in the same class of its pivot.Pivots_manager/PivotConfigs
(* = mandatory, ~ = mandatory on conditions)Most of the times you don't need to create a new Pivot Module. There are already some base modules that can be extended.
+The most important ones are the following 2:
+- 1.AnyCompare
: use this module if you want to create a custom Pivot from a specific value extracted from the results of the analyzers/connectors. How? you should populate the parameter field_to_compare
with the dotted path to the field you would like to extract the value from.
+- 2.SelfAnalyzable
: use this module if you want to create a custom Pivot that would analyze again the same observable/file.
visualizers
directory_monkeypatch()
in its class to create automated tests for the new visualizer. This is a trick to have tests in the same class of its visualizer.Visualizers_manager/VisualizerConfigs
(* = mandatory, ~ = mandatory on conditions)The visualizers' python code could be not immediate, so a small digression on how it works is necessary.
+Visualizers have as goal to create a data structure inside the Report
that the frontend is able to parse and correctly visualize on the page.
+To do so, some utility classes have been made:
Class | +Description | +Visual representation/example | +
---|---|---|
VisualizablePage | +A single page of the final report, made of different levels. Each page added is represented as a new tab in frontend. | ++ |
VisualizableLevel | ++ Each level corresponds to a line in the final frontend visualizations. Every level is made of a + VisualizableHorizontalList. + The dimension of the level can be customized with the size parameter (1 is the biggest, 6 is the smallest). + | ++ |
VisualizableHorizontalList | +An horizontal list of visualizable elements. In the example there is an horizontal list of vertical lists. | ++ |
VisualizableVerticalList | +A vertical list made of a name, a title, and the list of elements. | ++ |
VisualizableTable | +A table of visualizable elements. In the example there is a table of base and vertical lists. | ++ |
VisualizableBool | +The representation of a boolean value. It can be enabled or disabled with colors. | ++ |
VisualizableTitle | +The representation of a tuple, composed of a title and a value. | ++ |
VisualizableBase | +The representation of a base string. Can have a link attached to it and even an icon. The background color can be changed. | +The title above is composed by two VisualizableBase |
+
Inside a Visualizer
you can retrieve the reports of the analyzers and connectors that have been specified inside configuration of the Visualizer itself using .analyzer_reports()
and .connector_reports()
.
+At this point, you can compose these values as you wish wrapping them with the Visualizable
classes mentioned before.
The best way to create a visualizer is to define several methods, one for each Visualizable
you want to show in the UI, in your new visualizer and decore them with visualizable_error_handler_with_params
. This decorator handles exceptions: in case there is a bug during the generation of a Visualizable element, it will be show an error instead of this component and all the other Visualizable are safe and will render correctly. Be careful using it because is a function returning a decorator! This means you need to use a syntax like this:
@visualizable_error_handler_with_params(error_name="custom visualizable", error_size=VisualizableSize.S_2)
+def custom_visualizable(self):
+ ...
+
instead of the syntax of other decorators that doesn't need the function call.
+You may want to look at a few existing examples to start to build a new one:
+ +To allow other people to use your configuration, that is now stored in your local database, you have to export it and create a data migration
+dumpplugin
to automatically create the migration file for your new analyzer (you will find it under api_app/YOUR_PLUGIN_manager/migrations
). The script will create the following models:
+ 1. PythonModule
+ 2. AnalyzerConfig
+ 3. Parameter
+ 4. PluginConfigdocker exec -ti intelowl_uwsgi python3 manage.py dumpplugin AnalyzerConfig <new_analyzer_name>
Add the new analyzer in the lists in the docs: Usage. Also, if the analyzer provides additional optional configuration, add the available options here: Advanced-Usage
+In the Pull Request remember to provide some real world examples (screenshots and raw JSON results) of some successful executions of the analyzer to let us understand how it would work.
+Playbooks_manager/PlaybookConfigs
(* = mandatory, ~ = mandatory on conditions)To allow other people to use your configuration, that is now stored in your local database, you have to export it and create a data migration
+You can use the django management command dumpplugin
to automatically create the migration file for your new analyzer (you will find it under api_app/playbook_manager/migrations
).
Example: docker exec -ti intelowl_uwsgi python3 manage.py dumpplugin PlaybookConfig <new_analyzer_name>
If the changes that you have to make should stay local, you can just change the configuration inside the Django admin
page.
But if, instead, you want your changes to be usable by every IntelOwl user, you have to create a new migration.
+To do so, you can use the following snippets as an example:
+.full_clean()
and then you can save the instance with .save()
def migrate(apps, schema_editor):
+ PythonModule = apps.get_model("api_app", "PythonModule")
+ Parameter = apps.get_model("api_app", "Parameter")
+ PluginConfig = apps.get_model("api_app", "PluginConfig")
+ pm = PythonModule.objects.get(module="test.Test", base_path="api_app.connectors_manager.connectors")
+ p = Parameter(name="mynewfield", type="str", description="Test field", is_secret=False, required=True, python_module=pm)
+ p.full_clean()
+ p.save()
+ for connector in pm.connectorconfigs.all():
+ pc = PluginConfig(value="test", connector_config=connector, python_module=pm, for_organization=False, owner=None, parameter=p)
+ pc.full_clean()
+ pc.save()
+
def migrate(apps, schema_editor):
+ PythonModule = apps.get_model("api_app", "PythonModule")
+ Parameter = apps.get_model("api_app", "Parameter")
+ pm = PythonModule.objects.get(module="test.Test", base_path="api_app.connectors_manager.connectors")
+ p = Parameter(name="mynewsecret", type="str", description="Test field", is_secret=True, required=True, python_module=pm)
+ p.full_clean()
+ p.save()
+
def migrate(apps, schema_editor):
+ PythonModule = apps.get_model("api_app", "PythonModule")
+ Parameter = apps.get_model("api_app", "Parameter")
+ pm = PythonModule.objects.get(module="test.Test", base_path="api_app.connectors_manager.connectors")
+ Parameter.objects.get(name="myoldfield", python_module=pm).delete()
+
def migrate(apps, schema_editor):
+ PythonModule = apps.get_model("api_app", "PythonModule")
+ Parameter = apps.get_model("api_app", "Parameter")
+ PluginConfig = apps.get_model("api_app", "PluginConfig")
+ pm = PythonModule.objects.get(module="test.Test", base_path="api_app.connectors_manager.connectors")
+ p = Parameter.objects.get(name="myfield", python_module=pm)
+ PluginConfig.objects.filter(parameter=p, python_module=pm, for_organization=False, owner=None ).update(value="newvalue")
+
Since v4, IntelOwl leverages some packages from Certego:
+If you need to modify the behavior or add feature to those packages, please follow the same rules for IntelOwl and request a Pull Request there. The same maintainers of IntelOwl will answer to you.
+Follow these guides to understand how to start to contribute to them while developing for IntelOwl:
+IntelOwl makes use of the django testing framework and the unittest
library for unit testing of the API endpoints and End-to-End testing of the analyzers and connectors.
tests/test_files.zip
(password: "intelowl") there are some files that you can use for testing purposes.With the following environment variables you can customize your tests:
+DISABLE_LOGGING_TEST
-> disable logging to get a clear outputMOCK_CONNECTIONS
-> mock connections to external API to test the analyzers without a real connection or a valid API keyTEST_MD5
TEST_URL
TEST_IP
TEST_DOMAIN
The point here is to launch the code in your environment and not the last official image in Docker Hub.
+For this, use the test
or the ci
option when launching the containers with the ./start
script.
test
option to actually execute tests that simulate a real world environment without mocking connections.ci
option to execute tests in a CI environment where connections are mocked.$ ./start test up
+$ # which corresponds to the command: docker-compose -f docker/default.yml -f docker/test.override.yml up
+
Now that the containers are up, we can launch the test suite.
+Examples:
+ +To test a plugin in real environment, i.e. without mocked data, we suggest that you use the GUI of IntelOwl directly. +Meaning that you have your plugin configured, you have selected a correct observable/file to analyze, +and the final report shown in the GUI of IntelOwl is exactly what you wanted.
+Examples:
+ +All the frontend tests must be run from the folder frontend
.
+The tests can contain log messages, you can suppress then with the environment variable SUPPRESS_JEST_LOG=True
.
npm test -- -t '<describeString> <testString>'
+// example
+npm test -- -t "Login component User login"
+
Please create pull requests only for the branch develop. That code will be pushed to master only on a new release.
+Also remember to pull the most recent changes available in the develop branch before submitting your PR. If your PR has merge conflicts caused by this behavior, it won't be accepted.
+Run pip install -r requirements/test-requirements.txt
to install the requirements to validate your code.
psf/black
to lint the files automatically, then flake8
to check and isort
:(if you installed pre-commit
this is performed automatically at every commit)
$ black . --exclude "migrations|venv"
+$ flake8 . --show-source --statistics
+$ isort . --profile black --filter-files --skip venv
+
if flake8 shows any errors, fix them.
+$ docker exec -ti intelowl_uwsgi unzip -P intelowl tests/test_files.zip -d test_files
+$ docker exec -ti intelowl_uwsgi python manage.py test tests
+
++Note: IntelOwl has dynamic testing suite. This means that no explicit analyzers/connector tests are required after the addition of a new analyzer or connector.
+
If everything is working, before submitting your pull request, please squash your commits into a single one!
+git rebase -i HEAD~[NUMBER OF COMMITS]
git push --force-with-lease origin
.Squashing commits can be a tricky process but once you figure it out, it's really helpful and keeps our repo concise and clean.
+Keep in mind that, if any errors arise during development, you would need to check the application logs to better understand what is happening so you can easily address the problem.
+This is the reason why it is important to add tons of logs in the application...if they are not available in time of needs you would cry really a lot.
+Where are IntelOwl logs? +With a default installation of IntelOwl, you would be able to get the application data from the following paths in your OS:
+/var/lib/docker/volumes/intel_owl_generic_logs/_data/django
: Django Application logs/var/lib/docker/volumes/intel_owl_generic_logs/_data/uwsgi
: Uwsgi application server logs/var/lib/docker/volumes/intel_owl_nginx_logs/_data/
: Nginx Web Server LogsThe project leverages docker compose
with a custom Bash script and you need to have the following packages installed in your machine:
In some systems you could find pre-installed older versions. Please check this and install a supported version before attempting the installation. Otherwise it would fail.
+Note: We've added a new Bash script initialize.sh
that will check compatibility with your system and attempt to install the required dependencies.
Note
+Warning
+Thestart
script requires a `bash` version > 4 to run.
+
+Note that macOS is shipped with an older version of bash
. Please ensure to upgrade before running the script.
+
+Obviously we strongly suggest reading through all the page to configure IntelOwl in the most appropriate way.
+However, if you feel lazy, you could just install and test IntelOwl with the following steps.
+docker
will be run with sudo
if permissions/roles have not been set.
# clone the IntelOwl project repository
+git clone https://github.com/intelowlproject/IntelOwl
+cd IntelOwl/
+
+# run helper script to verify installed dependencies and configure basic stuff
+./initialize.sh
+
+# start the app
+./start prod up
+# now the application is running on http://localhost:80
+
+# create a super user
+sudo docker exec -ti intelowl_uwsgi python3 manage.py createsuperuser
+
+# now you can login with the created user from http://localhost:80/login
+
+# Have fun!
+
Warning
+The first time you start IntelOwl, a lot of database migrations are being applied. This requires some time. If you get 500 status code errors in the GUI, just wait few minutes and then refresh the page. +These are our recommendations for dedicated deployments of IntelOwl:
+Please remember that every environment has its own peculiarities so these numbers must not be taken as the holy grail.
+What should be done is a comprehensive evaluation of the environment where the application will deployed.
+For more complex environments, a Docker Swarm / Kubernetes cluster is recommended.
+IntelOwl's maintainers are available to offer paid consultancy and mentorship about that.
+IntelOwl is composed of various different technologies, namely:
+All these components are managed via docker compose
.
Open a terminal and execute below commands to construct new environment files from provided templates.
+ +In the docker/env_file_app
, configure different variables as explained below.
REQUIRED variables to run the image:
+DB_HOST
, DB_PORT
, DB_USER
, DB_PASSWORD
: PostgreSQL configuration (The DB credentals should match the ones in the env_file_postgres
). If you like, you can configure the connection to an external PostgreSQL instance in the same variables. Then, to avoid to run PostgreSQL locally, please run IntelOwl with the option --use-external-database
. Otherwise, DB_HOST
must be postgres
to have the app properly communicate with the PostgreSQL container.DJANGO_SECRET
: random 50 chars key, must be unique. If you do not provide one, Intel Owl will automatically set a secret key and use the same for each run. The key is generated by initialize.sh
script.
Strongly recommended variable to set:
+INTELOWL_WEB_CLIENT_DOMAIN
(example: localhost
/mywebsite.com
): the web domain of your instance, this is used for generating links to analysis results.Optional configuration:
+OLD_JOBS_RETENTION_DAYS
: Database retention for analysis results (default: 14 days). Change this if you want to keep your old analysis longer in the database.Configuration required to enable integration with Slack:
+SLACK_TOKEN
: Slack token of your Slack application that will be used to send/receive notificationsDEFAULT_SLACK_CHANNEL
: ID of the Slack channel you want to post the message toConfiguration required to have InteOwl sending Emails (registration requests, mail verification, password reset/change, etc)
+DEFAULT_FROM_EMAIL
: email address used for automated correspondence from the site manager (example: noreply@mydomain.com
)DEFAULT_EMAIL
: email address used for correspondence with users (example: info@mydomain.com
)EMAIL_HOST
: the host to use for sending email with SMTPEMAIL_HOST_USER
: username to use for the SMTP server defined in EMAIL_HOSTEMAIL_HOST_PASSWORD
: password to use for the SMTP server defined in EMAIL_HOST. This setting is used in conjunction with EMAIL_HOST_USER when authenticating to the SMTP server.EMAIL_PORT
: port to use for the SMTP server defined in EMAIL_HOST.EMAIL_USE_TLS
: whether to use an explicit TLS (secure) connection when talking to the SMTP server, generally used on port 587.EMAIL_USE_SSL
: whether to use an implicit TLS (secure) connection when talking to the SMTP server, generally used on port 465.If you use a local PostgreSQL instance (this is the default), in the env_file_postgres
you have to configure different variables as explained below.
Required variables:
+POSTGRES_PASSWORD
(same as DB_PASSWORD
)POSTGRES_USER
(same as DB_USER
)POSTGRES_DB
(default: intel_owl_db
)If you want to have your logs rotated correctly, we suggest you to add the configuration for the system Logrotate.
+To do that you can leverage the initialize.sh
script. Otherwise, if you have skipped that part, you can manually install logrotate by launching the following script:
We decided to do not leverage Django Rotation Configuration because it caused problematic concurrency issues, leading to logs that are not rotated correctly and to apps that do not log anymore. +Logrotate configuration is more stable.
+We added few Crontab configurations that could be installed in the host machine at system level to solve some possible edge-case issues:
+application_restart
has been added for this purpose (it uses the absolute path of start
script in the container). This cron assumes that you have executed IntelOwl with the parameters --all_analyzers
. If you didn't, feel free to change the cron as you wish.This configuration is optional but strongly recommended for people who want to have a production grade deployment. To install it you need to run the following script in each deployed server:
+ +Intel Owl provides basic configuration for:
+configuration/nginx/http.conf
)configuration/intel_owl.ini
)In case you enable HTTPS, remember to set the environment variable HTTPS_ENABLED
as "enabled" to increment the security of the application.
There are 3 options to execute the web server:
+HTTP only (default)
+The project would use the default deployment configuration and HTTP only.
+HTTPS with your own certificate
+The project provides a template file to configure Nginx to serve HTTPS: configuration/nginx/https.conf
.
You should change ssl_certificate
, ssl_certificate_key
and server_name
in that file and put those required files in the specified locations.
Then you should call the ./start
script with the parameter --https
to leverage the right Docker Compose file for HTTPS.
Plus, if you use Flower, you should change in the docker/flower.override.yml
the flower_http.conf
with flower_https.conf
.
HTTPS with Let's Encrypt
+We provide a specific docker-compose file that leverages Traefik to allow fast deployments of public-faced and HTTPS-enabled applications.
+Before using it, you should configure the configuration file docker/traefik.override.yml
by changing the email address and the hostname where the application is served. For a detailed explanation follow the official documentation: Traefix doc.
After the configuration is done, you can add the option --traefik
while executing ./start
Important Info
+IntelOwl depends heavily on docker and docker compose so as to hide this complexity from the enduser the project +leverages a custom shell script (start
) to interface with docker compose
.
+
+You may invoke $ ./start --help
to get help and usage info.
+
+The CLI provides the primitives to correctly build, run or stop the containers for IntelOwl. Therefore,
+
+docker compose
Now that you have completed different configurations, starting the containers is as simple as invoking:
+ +You can add the docker
options -d
to run the application in the background.
Important Info
+Alldocker
and docker compose
specific options must be passed at the end of the script, after a --
token.
+This token indicates the end of IntelOwl's options and the beginning of Docker options.
+
+Example:
+
+
+Hint
+Starting from IntelOwl 4.0.0, with the startup script you can select which version of IntelOwl you want to run (--version
).
+This can be helpful to keep using old versions in case of retrocompatibility issues. The --version
parameter checks out the Git Repository to the Tag of the version that you have chosen. This means that if you checkout to a v3.x.x version, you won't have the --version
parameter anymore so you would need to manually checkout back to the master
branch to use newer versions.
+Warning
+If, for any reason, thestart
script does not work in your environment, we suggest to use plain docker compose
and configuring manually all the optional containers you need.
+
+The basic replacement of ./start prod up
would be:
+
+To stop the application you have to:
+-d
parameter: press CTRL+C
-d
parameter: ./start prod down
This is a destructive operation but can be useful to start again the project from scratch
+./start prod down -- -v
You may want to run docker exec -ti intelowl_uwsgi python3 manage.py createsuperuser
after first run to create a superuser.
+Then you can add other users directly from the Django Admin Interface after having logged with the superuser account.
+To manage users, organizations and their visibility please refer to this section
If you make some code changes and you like to rebuild the project, follow these steps:
+./start test build -- --tag=<your_tag> .
to build the new docker image.docker/test.override.yml
file../start test up -- --build
.To update the project with the most recent available code you have to follow these steps:
+$ cd <your_intel_owl_directory> # go into the project directory
+$ git pull # pull new changes
+$ ./start prod down # kill and destroy the currently running IntelOwl containers
+$ ./start prod up # restart the IntelOwl application
+
Note
+After an upgrade, sometimes a database error in Celery Containers could happen. That could be related to new DB migrations which are not applied by the main Uwsgi Container yet. Do not worry. Wait few seconds for the Uwsgi container to start correctly, then put down the application again and restart it. The problem should be solved. If not, please feel free to open an issue on Github +Note
+After having upgraded IntelOwl, in case the application does not start and you get an error like this: + + + +just run this: + + + +and restart IntelOwl. It should solve the permissions problem. + +Warning
+Major versions of IntelOwl are usually incompatible from one another. +Maintainers strive to keep the upgrade between major version easy but it's not always like that. +Below you can find the additional process required to upgrade from each major versions. +IntelOwl v6 introduced some major changes regarding how the project is started. +Before upgrading, some important things should be checked by the administrator:
+start
script that has the same options as the old Python start.py
script but is more manageable and has decreased the overall project dependencies. The start.py
script has now been removed. Please use the new start
script instead.start
script to run IntelOwl. That would spawn a Redis instance instead of a Rabbit-MQ one locally.Warning
+CARE! We are providing this database migration procedure to help the users to migrate to a new PostgreSQL version. + +Upgrading PostgreSQL is outside the scope of the IntelOwl project so we do not guarantee that everything will work as intended. + +In case of doubt, please check the official PostgreSQL documentation. + +Upgrade at your own risk. + +The database migration procedure is as follows:
+cd docker
docker run -d --name intelowl_postgres_12 -v intel_owl_postgres_data:/var/lib/postgresql/data/ --env-file env_file_postgres library/postgres:12-alpine
docker exec -t intelowl_postgres_12 pg_dump -U <POSTGRES_USER> -d <POSTGRES_DB> --no-owner > /tmp/dump_intelowl.sql
docker container stop intelowl_postgres_12
docker container rm intelowl_postgres_12
docker volume rm intel_owl_postgres_data
<------------- remove old data, this is not exactly necessary because the new postgres has a different volume namedocker run -d --name intelowl_postgres_16 -v intelowl_postgres_data:/var/lib/postgresql/data/ --env-file env_file_postgres library/postgres:16-alpine
cat /tmp/dump_intelowl.sql | docker exec -i intelowl_postgres_16 psql -U <POSTGRES_USER> -d <POSTGRES_DB>
docker container stop intelowl_postgres_16
docker container rm intelowl_postgres_16
IntelOwl v5 introduced some major changes regarding how the plugins and their related configuration are managed in the application. +Before upgrading, some important things should be checked by the administrator:
+analyzer_config.json
which was storing all the base configuration of the Analyzers to a database model (we did the same for all the other plugins types too). This allows us to manage plugins creation/modification/deletion in a more reliable manner and via the Django Admin Interface. If you have created custom plugins and changed those <plugins>_config.json
file manually, you would need to re-create those custom plugins again from the Django Admin Interface. To do that please follow the related new documentationPulsedive_Active_IOC
analyzer. Please substitute it with the new Pulsedive
analyzer.Fortiguard
analyzer because endpoint does not work anymore. No substitute.Rendertron
analyzer not working as intended. No substitute.ThreatMiner
, SecurityTrails
and Robtex
various analyzers and substituted with new versions.Doc_Info_Experimental
. Its functionality (XLM Macro parsing) is moved to Doc_Info
Strings_Info_Classic
. Please use Strings_Info
Strings_Info_ML
. Please use Strings_Info
and set the parameter rank_strings
to True
Yara_Scan_<repo>
analyzers. They all went merged in the single Yara
analyzerDarksearch_Query
analyzer because the service does not exist anymore. No substitute.UnpacMe_EXE_Unpacker
. Please use UnpacMe
BoxJS_Scan_JavaScript
. Please use BoxJS
Anomali_Threatstream_<option>
analyzers. Now we have a single Anomali_Threatstream
analyzer. Use the parameters to select the specific API you need.This is not supported. Please perform a major upgrade once at a time.
+IntelOwl v4 introduced some major changes regarding the permission management, allowing an easier way to manage users and visibility. But that did break the previous available DB. +So, to migrate to the new major version you would need to delete your DB. To do that, you would need to delete your volumes and start the application from scratch.
+ +Please be aware that, while this can be an important effort to manage, the v4 IntelOwl provides an easier way to add, invite and manage users from the application itself. See the Organization section.
+Users upgrading from previous versions need to manually move env_file_app
, env_file_postgres
and env_file_integrations
files under the new docker
directory.
If you are updating to >v1.3.0 from any prior version, you need to execute a helper script so that the old data present in the database doesn't break.
+Follow the above updation steps, once the docker containers are up and running execute the following in a new terminal
+ +to get a shell session inside the IntelOwl's container.
+Now just copy and paste the below command into this new session,
+ +If you see "Update successful!", everything went fine and now you can enjoy the new features!
+IntelOwl was designed with the intent to help the community, in particular those researchers that can not afford commercial solutions, in the generation of threat intelligence data, in a simple, scalable and reliable way.
+Main features:
+To know more about the project and its growth over time, you may be interested in reading the following official blog posts and/or videos:
+Feel free to ask everything it comes to your mind about the project to the author: +Matteo Lodi (Twitter).
+We also have a dedicated twitter account for the project: @intel_owl.
+This page includes the most important things to know and understand when using IntelOwl.
+Intel Owl main objective is to provide a single API interface to query in order to retrieve threat intelligence at scale.
+There are multiple ways to interact with the Intel Owl APIs,
+Web Interface
+pyIntelOwl (CLI/SDK)
+goIntelOwl (CLI/SDK)
+Hint: Tokens Creation
+The server authentication is managed by API tokens. So, if you want to interact with Intel Owl, you have two ways to do that: +Plugins are the core modular components of IntelOwl that can be easily added, changed and customized. +There are several types of plugins:
+ +Analyzers are the most important plugins in IntelOwl. They allow to perform data extraction on the observables and/or files that you would like to analyze.
+The following is the list of the available analyzers you can run out-of-the-box. You can also navigate the same list via the
+$ pyintelowl get-analyzer-config
APKiD
: APKiD identifies many compilers, packers, obfuscators, and other weird stuff from an APK or DEX file.Androguard
: Androguard is a python tool which can be leveraged to get useful information from the APK, for example, permissions, activities, services, 3rd party permissions, etc.BoxJS_Scan_Javascript
: Box-JS is a tool for studying JavaScript malware.Capa_Info
: Capa detects capabilities in executable filesCapa_Info_Shellcode
: Capa detects capabilities in shellcodeClamAV
: scan a file via the ClamAV AntiVirus Engine. IntelOwl automatically keep ClamAV updated with official and unofficial open source signaturesDoc_Info
: static document analysis with new features to analyze XLM macros, encrypted macros and more (combination of Oletools and XLMMacroDeobfuscator)ELF_Info
: static ELF analysis with pyelftools and telfhashFile_Info
: static generic File analysis (hashes, magic and exiftool)Floss
: Mandiant Floss Obfuscated String Solver in filesHfinger
: create fingerprints of malware HTTPS requests using HfingerPE_Info
: static PE analysis with pefilePEframe_Scan
: Perform static analysis on Portable Executable malware and malicious MS Office documents with PeFramePermhash
: create hash of manifest permssions found in APK, Android manifest, Chrome extensions or Chrome extension manifest using PermhashPDF_Info
: static PDF analysis (peepdf + pdfid)Qiling_Linux
: Qiling qiling linux binary emulation.Qiling_Linux_Shellcode
: Qiling qiling linux shellcode emulation.Qiling_Windows
: Qiling qiling windows binary emulation.Qiling_Windows_Shellcode
: Qiling qiling windows shellcode emulation.Quark_Engine
: Quark Engine is an Obfuscation-Neglect Android Malware Scoring System.Rtf_Info
: static RTF analysis (Oletools)Signature_Info
: PE signature extractor with osslsigncodeSpeakeasy
: Mandiant Speakeasy binary emulationSpeakEasy_Shellcode
: Mandiant Speakeasy shellcode emulationStrings_Info
: Strings extraction. Leverages Mandiant's StringsifterSuricata
: Analyze PCAPs with open IDS signatures with Suricata engineThug_HTML_Info
: Perform hybrid dynamic/static analysis on a HTML file using Thug low-interaction honeyclientXlm_Macro_Deobfuscator
: XlmMacroDeobfuscator deobfuscate xlm macrosYara
: scan a file withZippy_scan
: Zippy: Fast method to classify text as AI or human-generated; takes in lzma
,zlib
,brotli
as input based engines; ensemble
being default.Blint
: Blint is a Binary Linter that checks the security properties and capabilities of your executables. Supported binary formats: - Android (apk, aab) - ELF (GNU, musl) - PE (exe, dll) - Mach-O (x64, arm64)Mobsf
: MobSF is a static analysis tool that can find insecure code patterns in your Android and iOS source code. Supports Java, Kotlin, Android XML, Swift and Objective C Code.DroidLysis
: DroidLysis is a pre-analysis tool for Android apps: it performs repetitive and boring tasks we'd typically do at the beginning of any reverse engineering. It disassembles the Android sample, organizes output in directories, and searches for suspicious spots in the code to look at. The output helps the reverse engineer speed up the first few steps of analysis.Artifacts
: Artifacts is a tool that does APK strings analysis. Useful for first analysis.CapeSandbox
: CAPESandbox automatically scans suspicious files using the CapeSandbox API. Analyzer works for private instances as well.Cymru_Hash_Registry_Get_File
: Check if a particular file is known to be malware by Team CymruCuckoo_Scan
: scan a file on Cuckoo (this analyzer is disabled by default. You have to change that flag in the config to use it)DocGuard_Upload_File
: Analyze office files in seconds. DocGuard.Dragonfly_Emulation
: Emulate malware against Dragonfly sandbox by Certego S.R.L.FileScan_Upload_File
: Upload your file to extract IoCs from executable files, documents and scripts via FileScan.io API.HashLookupServer_Get_File
: check if a md5 or sha1 is available in the database of known file hosted by CIRCLHybridAnalysis_Get_File
: check file hash on HybridAnalysis sandbox reportsIntezer_Scan
: scan a file on Intezer. Register for a free community account here. With TLP CLEAR
, in case the hash is not found, you would send the file to the service.Malpedia_Scan
: scan a binary or a zip file (pwd:infected) against all the yara rules available in MalpediaMalwareBazaar_Get_File
: Check if a particular malware sample is known to MalwareBazaarMISPFIRST_Check_Hash
: check a file hash on the FIRST MISP instanceMISP_Check_Hash
: check a file hash on a MISP instanceMWDB_Scan
: mwdblib Retrieve malware file analysis from repository maintained by CERT Polska MWDB. With TLP CLEAR
, in case the hash is not found, you would send the file to the service.OTX_Check_Hash
: check file hash on Alienvault OTXSublimeSecurity
: Analyze an Email with Sublime Security live flowTriage_Scan
: leverage Triage sandbox environment to scan various filesUnpacMe
: UnpacMe is an automated malware unpacking serviceVirushee_Scan
: Check file hash on Virushee API. With TLP CLEAR
, in case the hash is not found, you would send the file to the service.VirusTotal_v3_File
: check the file hash on VirusTotal. With TLP CLEAR
, in case the hash is not found, you would send the file to the service.YARAify_File_Scan
: scan a file against public and non-public YARA and ClamAV signatures in YARAify public serviceYARAify_File_Search
: scan an hash against YARAify databaseCheckDMARC
: An SPF and DMARC DNS records validator for domains.DNStwist
: Scan a url/domain to find potentially malicious permutations via dns fuzzing. dnstwist repoThug_URL_Info
: Perform hybrid dynamic/static analysis on a URL using Thug low-interaction honeyclientAILTypoSquatting
:AILTypoSquatting is a Python library to generate list of potential typo squatting domains with domain name permutation engine to feed AIL and other systems.AbuseIPDB
: check if an ip was reported on AbuseIPDBAbusix
: get abuse contacts of an IP address from AbusixBGP Ranking
: BGP-Ranking provides a way to collect such malicious activities, aggregate the information per ASN and provide a ranking model to rank the ASN from the most malicious to the less malicious ASN.Anomali_Threatstream_PassiveDNS
: Return information from passive dns of Anomali. On Anomali Threatstream PassiveDNS Api.Auth0
: scan an IP against the Auth0 APIBinaryEdge
: Details about an Host. List of recent events for the specified host, including details of exposed ports and services using IP query and return list of subdomains known from the target domains using domain queryBitcoinAbuse
: Check a BTC address against bitcoinabuse.com, a public database of BTC addresses used by hackers and criminals.Censys_Search
: scan an IP address against Censys View APICheckPhish
: CheckPhish can detect phishing and fraudulent sites.CIRCLPassiveDNS
: scan an observable against the CIRCL Passive DNS DBCIRCLPassiveSSL
: scan an observable against the CIRCL Passive SSL DBClassic_DNS
: Retrieve current domain resolution with default DNSCloudFlare_DNS
: Retrieve current domain resolution with CloudFlare DoH (DNS over HTTPS)CloudFlare_Malicious_Detector
: Leverages CloudFlare DoH to check if a domain is related to malwareCrowdsec
: check if an IP was reported on Crowdsec Smoke DatasetCymru_Hash_Registry_Get_Observable
: Check if a particular hash is available in the malware hash registry of Team CymruDNSDB
: scan an observable against the Passive DNS Farsight Database (support both v1 and v2 versions)DNS0_EU
: Retrieve current domain resolution with DNS0.eu DoH (DNS over HTTPS)DNS0_EU_Malicious_Detector
: Check if a domain or an url is marked as malicious in DNS0.eu database (Zero service)DocGuard_Get
: check if an hash was analyzed on DocGuard. DocGuardDShield
: Service Provided by DShield to get useful information about IP addressesFeodo_Tracker
: Feodo Tracker offers various blocklists, helping network owners to protect their users from Dridex and Emotet/Heodo.FileScan_Search
: Finds reports and uploaded files by various tokens, like hash, filename, verdict, IOCs etc via FileScan.io API.FireHol_IPList
: check if an IP is in FireHol's IPListGoogleSafebrowsing
: Scan an observable against GoogleSafeBrowsing DBGoogleWebRisk
: Scan an observable against WebRisk API (Commercial version of Google Safe Browsing). Check the docs to enable this properlyGoogle_DNS
: Retrieve current domain resolution with Google DoH (DNS over HTTPS)GreedyBear
: scan an IP or a domain against the GreedyBear API (requires API key)GreyNoise
: scan an IP against the Greynoise API (requires API key)GreyNoiseCommunity
: scan an IP against the Community Greynoise API (requires API key))Greynoise_Labs
: scan an IP against the Greynoise API (requires authentication token which can be obtained from cookies on Greynoise website after launching the playground from here)HashLookupServer_Get_Observable
: check if a md5 or sha1 is available in the database of known file hosted by CIRCLHoneyDB_Get
: HoneyDB IP lookup serviceHoneyDB_Scan_Twitter
: scan an IP against HoneyDB.io's Twitter Threat FeedHunter_How
: Scans IP and domain against Hunter_How API.Hunter_Io
: Scans a domain name and returns set of data about the organisation, the email address found and additional information about the people owning those email addresses.HybridAnalysis_Get_Observable
: search an observable in the HybridAnalysis sandbox reportsIP2WHOIS
: API Docs IP2Location.io IP2WHOIS Domain WHOIS API helps users to obtain domain information and WHOIS record by using a domain name.IPQS_Fraud_And_Risk_Scoring
: Scan an Observable against IPQualityscoreInQuest_DFI
: Deep File Inspection by InQuest LabsInQuest_IOCdb
: Indicators of Compromise Database by InQuest LabsInQuest_REPdb
: Search in InQuest Lab's Reputation DatabaseIPApi
: Get information about IPs using batch-endpoint and DNS using DNS-endpoint.IPInfo
: Location Information about an IPIp2location
: API Docs IP2Location.io allows users to check IP address location in real time. (Supports both with or without key)Intezer_Get
: check if an analysis related to a hash is available in Intezer. Register for a free community account here.Koodous
: koodous API get information about android malware.MalwareBazaar_Get_Observable
: Check if a particular malware hash is known to MalwareBazaarMalwareBazaar_Google_Observable
: Check if a particular IP, domain or url is known to MalwareBazaar using google searchMaxMindGeoIP
: extract GeoIP info for an observableMISP
: scan an observable on a MISP instanceMISPFIRST
: scan an observable on the FIRST MISP instanceMmdb_server
: Mmdb_server mmdb-server is an open source fast API server to lookup IP addresses for their geographic location, AS number.Mnemonic_PassiveDNS
: Look up a domain or IP using the Mnemonic PassiveDNS public API.MWDB_Get
: mwdblib Retrieve malware file analysis by hash from repository maintained by CERT Polska MWDB.Netlas
: search an IP against NetlasNERD_analyzer
: scan an IP address against NERD databaseONYPHE
: search an observable in ONYPHEOpenCTI
: scan an observable on an OpenCTI instanceOTXQuery
: scan an observable on Alienvault OTXPhishstats
: Search PhishStats API to determine if an IP/URL/domain is malicious.Phishtank
: Search an url against Phishtank APIPhishingArmy
: Search an observable in the PhishingArmy blocklistPulsedive
: Scan indicators and retrieve results from Pulsedive's API.Quad9_DNS
: Retrieve current domain resolution with Quad9 DoH (DNS over HTTPS)Quad9_Malicious_Detector
: Leverages Quad9 DoH to check if a domain is related to malwareRobtex
: scan a domain/IP against the Robtex Passive DNS DBSecuritytrails
: scan an IP/Domain against Securitytrails APIShodan_Honeyscore
: scan an IP against Shodan Honeyscore APIShodan_Search
: scan an IP against Shodan Search APISpyse
: Scan domains, IPs, emails and CVEs using Spyse's API. Register here.SSAPINet
: get a screenshot of a web page using screenshotapi.net (external source); additional config options can be added to extra_api_params
in the config.Stalkphish
: Search Stalkphish API to retrieve information about a potential phishing site (IP/URL/domain/Generic).Stratosphere_Blacklist
: Cross-reference an IP from blacklists maintained by Stratosphere LabsTalosReputation
: check an IP reputation from TalosThreatFox
: search for an IOC in ThreatFox's databaseThreatminer
: retrieve data from Threatminer APITorNodesDanMeUk
: check if an IP is a Tor Node using a list of all Tor nodes provided by dan.me.ukTorProject
: check if an IP is a Tor Exit NodeTriage_Search
: Search for reports of observables or upload from URL on triage cloudTranco
: Check if a domain is in the latest Tranco ranking top sites listURLhaus
: Query a domain or URL against URLhaus API.UrlScan_Search
: Search an IP/domain/url/hash against URLScan APIUrlScan_Submit_Result
: Submit & retrieve result of an URL against URLScan APIVirushee_CheckHash
: Search for a previous analysis of a file by its hash (SHA256/SHA1/MD5) on Virushee API.VirusTotal_v3_Get_Observable
: search an observable in the VirusTotal DBWhoisxmlapi
: Fetch WHOIS record data, of a domain name, an IP address, or an email address.WhoIs_RipeDB_Search
: Fetch whois record data of an IP address from Ripe DB using their search API (no API key required)XForceExchange
: scan an observable on IBM X-Force ExchangeYARAify_Search
: lookup a file hash in Abuse.ch YARAifyYETI
(Your Everyday Threat Intelligence): scan an observable on a YETI instance.Zoomeye
: Zoomeye Cyberspace Search Engine recording information of devices, websites, services and components etc..Validin
:Validin investigates historic and current data describing the structure and composition of the internet.TweetFeed
: TweetFeed collects Indicators of Compromise (IOCs) shared by the infosec community at Twitter.\r\nHere you will find malicious URLs, domains, IPs, and SHA256/MD5 hashes.HudsonRock
: Hudson Rock provides its clients the ability to query a database of over 27,541,128 computers which were compromised through global info-stealer campaigns performed by threat actors.CyCat
: CyCat or the CYbersecurity Resource CATalogue aims at mapping and documenting, in a single formalism and catalogue available cybersecurity tools, rules, playbooks, processes and controls.Vulners
: Vulners is the most complete and the only fully correlated security intelligence database, which goes through constant updates and links 200+ data sources in a unified machine-readable format. It contains 8 mln+ entries, including CVEs, advisories, exploits, and IoCs — everything you need to stay abreast on the latest security threats.Some analyzers require details other than just IP, URL, Domain, etc. We classified them as generic
Analyzers. Since the type of field is not known, there is a format for strings to be followed.
CyberChef
: Run a query on a CyberChef server using pre-defined or custom recipes.Anomali_Threatstream_Confidence
: Give max, average and minimum confidence of maliciousness for an observable. On Anomali Threatstream Confidence API.Anomali_Threatstream_Intelligence
: Search for threat intelligence information about an observable. On Anomali Threatstream Intelligence API.CRXcavator
: scans a chrome extension against crxcavator.ioDehashed_Search
: Query any observable/keyword against https://dehashed.com's search API.EmailRep
: search an email address on emailrep.ioHaveIBeenPwned
: HaveIBeenPwned checks if an email address has been involved in a data breachIntelX_Intelligent_Search
: IntelligenceX is a search engine and data archive. Fetches emails, urls, domains associated with an observable or a generic string.IntelX_Phonebook
: IntelligenceX is a search engine and data archive. Fetches emails, urls, domains associated with an observable or a generic string.IPQS_Fraud_And_Risk_Scoring
: Scan an Observable against IPQualityscoreMISP
: scan an observable on a MISP instanceVirusTotal_v3_Intelligence_Search
: Perform advanced queries with VirusTotal Intelligence (requires paid plan)WiGLE
: Maps and database of 802.11 wireless networks, with statistics, submitted by wardrivers, netstumblers, and net huggers.YARAify_Generics
: lookup a YARA rule (default), ClamAV rule, imphash, TLSH, telfhash or icon_dash in YARAifyPhoneInfoga
: PhoneInfoga is one of the most advanced tools to scan international phone numbers.HudsonRock
: Hudson Rock provides its clients the ability to query a database of over 27,541,128 computers which were compromised through global info-stealer campaigns performed by threat actors.Some analyzers are optional and need to be enabled explicitly.
+Connectors are designed to run after every successful analysis which makes them suitable for automated threat-sharing. They support integration with other SIEM/SOAR projects, specifically aimed at Threat Sharing Platforms.
+The following is the list of the available connectors. You can also navigate the same list via the
+$ pyintelowl get-connector-config
MISP
: automatically creates an event on your MISP instance, linking the successful analysis on IntelOwl.OpenCTI
: automatically creates an observable and a linked report on your OpenCTI instance, linking the the successful analysis on IntelOwl.YETI
: YETI = Your Everyday Threat Intelligence. find or create observable on YETI, linking the successful analysis on IntelOwl.Slack
: Send the analysis link to a Slack channel (useful for external notifications)EmailSender
: Send a generic email.AbuseSubmitter
: Send an email to request to take down a malicious domain.With IntelOwl v5.2.0 we introduced the Pivot
Plugin.
Pivots are designed to create a job from another job. This plugin allows the user to set certain conditions that trigger the execution of one or more subsequent jobs, strictly connected to the first one.
+This is a "SOAR" feature that allows the users to connect multiple analysis together.
+TakedownRequestToAbuseIp
: This Plugin leverages results from DNS resolver analyzers to extract a valid IP address to pivot to the Abusix analyzer.AbuseIpToSubmission
: This Plugin leverages results from the Abusix analyzer to extract the abuse contacts of an IP address to pivot to the AbuseSubmitter connector.You can build your own custom Pivot with your custom logic with just few lines of code. See the Contribute section for more info.
+From the GUI, the users can pivot in two ways:
+In both cases, the user is redirected to the Scan Page that is precompiled with the observable selected. Then the user would be able to select the Playbook to execute in the new job. +
+After the new Job is started, a new Investigation will be created (if it does not already exist) and both the jobs will be added to the same Investigation.
+In the following image you can find an example of an Investigation composed by 3 pivots generated manually:
+test\.com
Jobtest\.com
analysis had been created with a different Playbook.With IntelOwl v5 we introduced a new plugin type called Visualizers. +You can leverage it as a framework to create custom aggregated and simplified visualization of analyzer results.
+Visualizers are designed to run after the analyzers and the connectors. +The visualizer adds logic after the computations, allowing to show the final result in a different way than merely the list of reports.
+Visualizers can be executed only during Scans
through the playbook that has been configured on the visualizer itself.
This framework is extremely powerful and allows every user to customize the GUI as they wish. But you know...with great power comes great responsability. To fully leverage this framework, you would need to put some effort in place. You would need to understand which data is useful for you and then write few code lines that would create your own GUI. +To simplify the process, take example from the pre-built visualizers listed below and follow the dedicated documentation.
+DNS
: displays the aggregation of every DNS analyzer reportYara
: displays the aggregation of every matched rule by the Yara
AnalyzerDomain_Reputation
: Visualizer for the Playbook "Popular_URL_Reputation_Services"IP_Reputation
: Visualizer for the Playbook "Popular_IP_Reputation_Services"Pivot
: Visualizer that can be used in a Playbook to show the Pivot execution result. See Pivots for more info.With IntelOwl v5.1.0 we introduced the Ingestor
Plugin.
Ingestors allow to automatically insert IOC streams from outside sources to IntelOwl itself.
+Each Ingestor must have a Playbook
attached: this will allow to create a Job
from every IOC retrieved.
Ingestors are system-wide and disabled by default, meaning that only the administrator are able to configure them and enable them. +Ingestors can be spammy so be careful about enabling them.
+A very powerful use is case is to combine Ingestors with Connectors to automatically extract data from external sources, analyze them with IntelOwl and push them externally to another platform (like MISP or a SIEM)
+ThreatFox
: Retrieves daily ioc from https://threatfox.abuse.ch/
and analyze them.MalwareBazaar
: Retrieves hourly samples from https://bazaar.abuse.ch/
and analyze them.VirusTotal
: Perform intelligence queries at hourly intervals from https://www.virustotal.com/
(premium api key required), then retrieves the samples and analyzes them.Playbooks are designed to be easy to share sequence of running Plugins (Analyzers, Connectors, ...) on a particular kind of observable.
+If you want to avoid to re-select/re-configure a particular combination of analyzers and connectors together every time, you should create a playbook out of it and use it instead. This is time saver.
+This is a feature introduced since IntelOwl v4.1.0! Please provide feedback about it!
+The following is the list of the available pre-built playbooks. You can also navigate the same list via the
+$ pyintelowl get-playbook-config
FREE_TO_USE_ANALYZERS
: A playbook containing all free to use analyzers.Sample_Static_Analysis
: A playbook containing all analyzers that perform static analysis on files.Popular_URL_Reputation_Services
: Collection of the most popular and free reputation analyzers for URLs and DomainsPopular_IP_Reputation_Services
: Collection of the most popular and free reputation analyzers for IP addressesDns
: A playbook containing all dns providersTakedown_Request
: Start investigation to request to take down a malicious domain. A mail will be sent to the domain's abuse contacts foundAbuse_IP
: Playbook containing the Abusix analyzer. It is executed after the Takedown_Request playbookSend_Abuse_Email
: Playbook containing the AbuseSubmitter connector to send an email to request to take down a malicious domain. It is executed after the Abuse_IP playbookYou can create new playbooks in different ways, based on the users you want to share them with:
+If you want to share them to every user in IntelOwl, create them via the Django Admin interface at /admin/playbooks_manager/playbookconfig/
.
If you want share them to yourself or your organization only, you need to leverage the "Save as Playbook" button that you can find on the top right of the Job Result Page. +In this way, after you have done an analysis, you can save the configuration of the Plugins you executed for re-use with a single click.
+ +The created Playbook would be available to yourself only. If you want either to share it with your organization or to delete it, you need to go to the "Plugins" section and enable it manually by clicking the dedicated button.
+ +If you want to create completely new Plugins (not based on already existing python modules), please refer to the Contribute section. This is usually the case when you want to integrate IntelOwl with either a new tool or a new service.
+On the contrary, if you would like to just customize the already existing plugins, this is the place.
+If you are an IntelOwl superuser, you can create, modify, delete analyzers based on already existing modules by changing the configuration values inside the Django Admin interface at:
+/admin/analyzers_manager/analyzerconfig/
./admin/connectors_manager/connectorconfig/
.The following are the most important fields that you can change without touching the source code:
+Name
: Name of the analyzerDescription
: Description of the analyzerDisabled
: you can choose to disable certain analyzers, then they won't appear in the dropdown list and won't run if requested.Python Module
: Python path of the class that will be executed. This should not be changed most of the times.Maximum TLP
: see TLP SupportSoft Time Limit
: this is the maximum time (in seconds) of execution for an analyzer. Once reached, the task will be killed (or managed in the code by a custom Exception). Default 300
.Routing Key
: this takes effects only when multi-queue is enabled. Choose which celery worker would execute the task: local
(ideal for tasks that leverage local applications like Yara), long
(ideal for long tasks) or default
(ideal for simple webAPI-based analyzers).For analyzers only:
+Supported Filetypes
: can be populated as a list. If set, if you ask to analyze a file with a different mimetype from the ones you specified, it won't be executedNot Supported Filetypes
: can be populated as a list. If set, if you ask to analyze a file with a mimetype from the ones you specified, it won't be executedObservable Supported
: can be populated as a list. If set, if you ask to analyze an observable that is not in this list, it won't be executed. Valid values are: ip
, domain
, url
, hash
, generic
.For connectors only:
+Run on Failure
(default: true
): if they can be run even if the job has status reported_with_fails
For visualizers only:
+Playbooks
: list of playbooks that trigger the specified visualizer execution.Sometimes, it may happen that you would like to create a new analyzer very similar to an already existing one. Maybe you would like to just change the description and the default parameters.
+A helpful way to do that without having to copy/pasting the entire configuration, is to click on the analyzer that you want to copy, make the desired changes, and click the save as new
button.
Warning
+Changing other keys can break a plugin. In that case, you should think about duplicating the configuration entry or python module with your changes. +Other options can be added at the "Python module" level and not at the Plugin level. To do that, go to: admin/api_app/pythonmodule/
and select the Python module used by the Plugin that you want to change.
+For example, the analyzer AbuseIPDB
uses the Python module abuseipdb.AbuseIPDB
.
Once there, you'll get this screen:
+ +There you can change the following values:
+Update Schedule
: if the analyzer require some sort of update (local database, local rules, ...), you can specify the crontab schedule to update them.Health Check Schedule
: if the analyzer has implemented a Health Check, you can specify the crontab schedule to check whether the service works or not.Each Plugin could have one or more parameters available to be configured. These parameters allow the users to customize the Plugin behavior.
+There are 2 types of Parameters:
+Secrets
: these parameters usually manage sensitive data, like API keys.To see the list of these parameters:
+admin/api_app/parameter/
/admin/analyzers_manager/analyzerconfig/
You can change the Plugin Parameters at 5 different levels:
+Playbook Exception
+Please remember that, if you are executing a Playbook, the "Runtime configuration" of the Playbook take precedence over the Plugin Configuration. +Plugin Configuration Order
+Due to the multiple chances that are given to customize the parameters of the Plugins that are executed, it may be easy to confuse the order and launch Plugins without the awereness of what you are doing. + +This is the order to define which values are used for the parameters, starting by the most important element: + +- Runtime Configuration at Time of Request. +- Runtime Configuration of the Playbook (if a Playbook is used and the Runtime Configuration at Time of Request is empty) +- Plugin Configuration of the User +- Plugin Configuration of the Organization +- Default Plugin Configuration of the Parameter + +If you are using the GUI, please remember that you can always check the Parameters before starting a "Scan" by clicking at the "Runtime configuration" ![img.png](./static/runtime_config.png) button. + +Example: +![img.png](./static/runtime_config_2.png) + +By default, each available plugin is configured as either disabled or not. The majority of them are enabled by default, while others may be disabled to avoid potential problems in the application usability for first time users.
+Considering the impact that this change could have in the application, the GUI does not allow a normal user to enable/disable any plugin. On the contrary, users with specific privileges may change this configuration:
+All plugins, i.e. analyzers and connectors, have kill
and retry
actions. In addition to that, all docker-based analyzers and connectors have a healthcheck
action to check if their associated instances are up or not.
kill:
+Stop a plugin whose status is running
/pending
:
IntelOwl.kill_analyzer
and IntelOwl.kill_connector
function.$ pyintelowl jobs kill-analyzer <job_id> <analyzer_name>
and $ pyintelowl jobs kill-connector <job_id> <connector_name>
PATCH /api/job/{job_id}/{plugin_type/{plugin_name}/kill
and PATCH /api/job/{job_id}/connector/{connector_name}/kill
retry:
+Retry a plugin whose status is failed
/killed
:
IntelOwl.retry_analyzer
and IntelOwl.retry_connector
function,$ pyintelowl jobs retry-analyzer <job_id> <analyzer_name>
and $ pyintelowl jobs retry-connector <job_id> <connector_name>
PATCH /api/job/{job_id}/{plugin_type}/{plugin_name}/retry
healthcheck:
+Check if a plugin is able to connect to its provider:
+IntelOwl.analyzer_healthcheck
and IntelOwl.connector_healthcheck
methods.$ pyintelowl analyzer-healthcheck <analyzer_name>
and $ pyintelowl connector-healthcheck <connector_name>
GET /api/{plugin_type}/{plugin_name}/healthcheck
pull:
+Update a plugin with the newest rules/database:
+POST /api/{plugin_type}/{plugin_name}/pull
The Traffic Light Protocol (TLP) is a standard that was created to facilitate greater sharing of potentially sensitive information and more effective collaboration.
+IntelOwl is not a threat intel sharing platform, like the MISP platform. However, IntelOwl is able to share analysis results to external platforms (via Connectors) and to send possible privacy related information to external services (via Analyzers).
+This is why IntelOwl does support a customized version of the Traffic Light Protocol (TLP): to allow the user to have a better knowledge of how their data are being shared.
+Every Analyzer and Connector can be configured with a maximum_tlp
value.
+Based on that value, IntelOwl understands if the specific plugin is allowed or not to run (e.g. if maximum_tlp
is GREEN
, it would run for analysis with TLPs WHITE
and GREEN
only)
These is how every available TLP value behaves once selected for an analysis execution:
+CLEAR
: no restriction (WHITE
was replaced by CLEAR
in TLP v2.0, but WHITE
is supported for retrocompatibility)GREEN
: disable analyzers that could impact privacyAMBER
(default): disable analyzers that could impact privacy and limit view permissions to my groupRED
: disable analyzers that could impact privacy, limit view permissions to my group and do not use any external serviceA plugin can be run when all of the following requirements have been satisfied:
+Investigations are a new framework introduced in IntelOwl v6 with the goal to allow the users to connect the analysis they do with each other.
+In this way the analysts can use IntelOwl as the starting point of their "Investigations", register their findings, correlate the information found, and collaborate...all in a single place.
+Things to know about the framework:
+Investigations are created in 2 ways:
+If you want to add a job to an Investigation, you should click to the root block of the Investigation (see following image):
+ +Once a job has been added, you'll have something like this:
+ +If you want to remove a Job, you can click on the Job block and click "Remove branch". On the contrary, if you just want to see Job Results, you can click in the "Link" button. (check next image)
+ +A good client is a client that is easy to use, configurable and customizable to a user’s liking. Hence, the client has 4 great features:
+1. Configurable HTTP client
+2. Customizable timeouts
+3. Logger
+4. Easy ways to create the IntelOwlClient
Now from the documentation, you can see you can pass your http.Client
. This is to facilitate each user’s requirement and taste! If you don’t pass one (nil
) a default http.Client
will be made for you!
From IntelOwlClientOptions
you can add your own timeout to your requests as well.
To ease developers' work go-intelowl provides a logger for easy debugging and tracking! For the logger we used logrus because of 2 reasons: +1. Easy to use +2. Extensible to your liking
+IntelOwlClient
As you know working with Golang structs is sometimes cumbersome we thought we could provide a simple way to create the client in a way that helps speed up development. This gave birth to the idea of using a JSON
file to create the IntelOwlClient. The method NewIntelOwlClientThroughJsonFile
does exactly that. Send the IntelOwlClientOptions
JSON file path with your http.Client and LoggerParams in this method and you'll get the IntelOwlClient!
For the sake of simplicity, we decided that for some endpoints we’ll be passing Option Parameters
this is to facilitate easy access, configuration and automation so that you don’t need to pass in many parameters but just a simple struct that can be easily converted to and from JSON!
For example, let us look at the TagParams
we use it as an argument for a method Create
for TagService
. From a glance, the TagParams
look simple. They hold 2 fields: Label
, and Color
which can be passed seperatly to the method but imagine if you have many fields! (if you don’t believe see the ObservableAnalysisParams
)
For a practical implementation you can see the example
++go-intelowl is a client library/SDK that allows developers to easily automate and integrate IntelOwl with their own set of tools!
+ +Use go get to retrieve the SDK to add it to your GOPATH workspace, or project's Go module dependencies.
+ +This library was built with ease of use in mind! Here are some quick examples to get you started. If you need more example you can go to the examples directory
+To start using the go-intelowl library you first need to import it: +
+Construct a newIntelOwlClient
, then use the various services to easily access different parts of Intelowl's REST API. Here's an example of getting all jobs:
+clientOptions := gointelowl.IntelOwlClientOptions{
+ Url: "your-cool-URL-goes-here",
+ Token: "your-super-secret-token-goes-here",
+ // This is optional
+ Certificate: "your-optional-certificate-goes-here",
+}
+
+intelowl := gointelowl.NewIntelOwlClient(
+ &clientOptions,
+ nil
+)
+
+ctx := context.Background()
+
+// returns *[]Jobs or an IntelOwlError!
+jobs, err := intelowl.JobService.List(ctx)
+
options
structs. Where we can customize the client API or service endpoint to our liking! For more information go here. Here's a quick example!
+// ...Making the client and context!
+
+tagOptions = gointelowl.TagParams{
+ Label: "NEW TAG",
+ Color: "#ffb703",
+}
+
+createdTag, err := intelowl.TagService.Create(ctx, tagOptions)
+if err != nil {
+ fmt.Println(err)
+} else {
+ fmt.Println(createdTag)
+}
+
The examples directory contains a couple for clear examples, of which one is partially listed here as well:
+package main
+
+import (
+ "fmt"
+
+ "github.com/intelowlproject/go-intelowl/gointelowl"
+)
+
+func main(){
+ intelowlOptions := gointelowl.IntelOwlClientOptions{
+ Url: "your-cool-url-goes-here",
+ Token: "your-super-secret-token-goes-here",
+ Certificate: "your-optional-certificate-goes-here",
+ }
+
+ client := gointelowl.NewIntelOwlClient(
+ &intelowlOptions,
+ nil,
+ )
+
+ ctx := context.Background()
+
+ // Get User details!
+ user, err := client.UserService.Access(ctx)
+ if err != nil {
+ fmt.Println("err")
+ fmt.Println(err)
+ } else {
+ fmt.Println("USER Details")
+ fmt.Println(*user)
+ }
+}
+
If you want to follow the updates, discuss, contribute, or just chat then please join our slack channel we'd love to hear your feedback!
+Licensed under the GNU AFFERO GENERAL PUBLIC LICENSE.
+You need a valid API key to interact with the IntelOwl server.
+You can get an API by doing the following:
+1. Log / Signin into intelowl
+2. At the upper right click on your profile from the drop down select API Access/ Sessions
+3. Then generate an API key or see it!
Keys should be created from the admin interface of IntelOwl: you have to go in the Durin section (click on Auth tokens
) and generate a key there.
The unit tests were written as a combination of table driven tests and the approach used by go-github
+Firstly we use a TestData
struct that has the following fields:
+1. Input
- this is an interface
as it is to be used as the input required for an endpoint
+2. Data
- this is a string
as it'll be the JSON
string that the endpoint is expected to return
+2. StatusCode
- this is an int
as it is meant to be used as the expected response returned by the endpoint
+3. Want
- the expected struct that the method will return
Now the reason we made this was that these fields were needed for every endpoint hence combining them into a single struct provided us reusability and flexibility.
+Now the testing suite used go's httptest
library where we use httptest.Server
as this setups a test server so that we can easily mock it. We also use http.ServerMux
to mock our endpoints response.
Lets say IntelOwl added a new endpoint called supercool in Tag
. Now you've implemented the endpoint as a method of TagService
and now you want to add its unit tests.
First go to tagService_test.go
in the tests
directory and add
func TestSuperCoolEndPoint(t *testing.T) {
+ testCases := make(map[string]TestData)
+ testCases["simple"] = TestData{
+ Input: nil,
+ Data: `{ "supercool": "you're a great developer :)"}`,
+ StatusCode: http.StatusOK,
+ Want: "you're a great developer :)",
+ }
+ for name, testCase := range testCases {
+ // subtest
+ t.Run(name, func(t *testing.T) {
+ // setup will give you the client, mux/router, closeServer
+ client, apiHandler, closeServer := setup()
+ defer closeServer()
+ ctx := context.Background()
+ // now you can use apiHandler to mock how the server will handle this endpoints request
+ // you can use mux/router's Handle method or HandleFunc
+ apiHandler.Handle("/api/tag/supercool", func(w http.ResponseWriter, r *http.Request) {
+ // this is a helper test to check if it is the expected request sent by the client
+ testMethod(t, r, "GET")
+ w.Write([]byte(testCase.Data))
+ })
+ expectedRespone, err := client.TagService.SuperCool(ctx)
+ if err != nil {
+ testError(t, testCase, err)
+ } else {
+ testWantData(t, testCase.Want, expectedRespone)
+ }
+ })
+ }
+}
+
Great! Now you've added your own unit tests.
+
The data provided from the site https://greedybear.honeynet.org are licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
+
+ Your account @{{ username }} has been verified by us and you + can now + login + to and use {{ host_name }}. +
++ We regret to inform you that your account request (@{{ username }}) on {{ host_name }} has been declined. You can sign up again with a + business email address and not a personal one to increase your chances of + getting access. +
++ Note: If you believe you received this email in error, please contact us at + {{ default_email }}. +
++ As part of our commitment to keep GreedyBear and its users secure, we notify + you that someone just attempted to register with this email address. +
++ Please click the link below to reset your password on GreedyBear. +
+ +or, you may also copy and paste directly into your browser's URL bar.
+ +{{ reset_url }}+ +
+ Note: This URL is valid only for the next 24 hours. +
+ ++ If you did not request a password reset, you can safely ignore this email. +
++ Please click the link below to verify your email address. +
+ + + +or, you may also copy and paste directly into your browser's URL bar.
+ +{{ verification_url }}+ +
+ Note: This URL is valid only for the next 24 hours. +
+ +Built with @certego/certego-ui.
+src/components
.src/hooks
.src/stores
.public/ public static assets
+|- icons/ icons/favicon
+|- index.html/ root HTML file
+src/ source code
+|- components/ pages and components
+| |- auth/ `certego_saas.apps.auth` (login, logout pages)
+| |- dashboard/ dashboard page and charts
+| |- home/ landing/home page
+| |- Routes.jsx lazy route-component mappings
+|- constants/ constant values
+| |- api.js API URLs
+| |- environment.js environment variables
+| |- index.js GreedyBear specific constants
+|- hooks/ react hooks
+|- layouts/ header, main, footer containers
+|- stores/ zustand stores hooks
+|- styles/ scss files
+|- wrappers/ Higher-Order components
+|- App.jsx App component
+|- index.jsx Root JS file (ReactDOM renderer)
+
The frontend inside the docker containers does not hot-reload, so
+you need to use CRA dev server
on your host machine to serve pages when doing development on the frontend, using docker nginx only as API source.
http://localhost:80
node-js
installed, you have to do that. Follow the guide here. We tested this with NodeJS >=16.6http://localhost:3001
. It acts as proxy for API requests to original app web server.package.json
and enviroments.js
.Verifying...
+ + ) : ( +
+ Please note that GreedyBear is operated as an invite-only trust group.
+ Once you sign up, our team will reach out to you at the provided email
+ address.
+
+
+ We recommend signing up with a business email address and not a
+ personal one to increase your chances of getting access.
+
+
+ If you are an admin please check the{" "} + + documentation + {" "} + and correctly configure all the required variables. +
+{subText}
+ + Read + ++ Note: This is an irreversible operation. +
++ Once deleted, you cannot use this API key to access + GreedyBear's API. However, you will be able to generate a new + one. +
+ Are you sure you wish to proceed ? +The project goal is to extract data of the attacks detected by a TPOT or a cluster of them and to generate some feeds that can be used to prevent and detect attacks.
+ +Documentation about GreedyBear installation, usage, configuration and contribution can be found at this link
+There are public feeds provided by The Honeynet Project in this site. Example
+Please do not perform too many requests to extract feeds or you will be banned.
+If you want to be updated regularly, please download the feeds only once every 10 minutes (this is the time between each internal update).
+To check all the available feeds, Please refer to our usage guide
+GreedyBear provides an easy-to-query API to get the information available in GB regarding the queried observable (domain or IP address).
+To understand more, Please refer to our usage guide
+The tool has been created not only to provide the feeds from The Honeynet Project's cluster of TPOTs.
+If you manage one or more T-POTs of your own, you can get the code of this application and run Greedybear on your environment. +In this way, you are able to provide new feeds of your own.
+To install it locally, Please refer to our installation guide
+Certego is a MDR (Managed Detection and Response) and Threat Intelligence Provider based in Italy.
+Started as a personal Christmas project from Matteo Lodi, since then GreedyBear is being improved mainly thanks to the efforts of the Certego Threat Intelligence Team.
+The Honeynet Project is a non-profit organization working on creating open source cyber security tools and sharing knowledge about cyber threats.
++
+ Your account @{{ username }} has been verified by us and you + can now + login + to and use {{ host_name }}. +
++ We regret to inform you that your account request (@{{ username }}) on {{ host_name }} has been declined. You can sign up again with a + business email address and not a personal one to increase your chances of + getting access. +
++ Note: If you believe you received this email in error, please contact us at + {{ default_email }}. +
++ As part of our commitment to keep IntelOwl and its users secure, we notify + you that someone just attempted to register with this email address. +
++ Please click the link below to reset your password on IntelOwl. +
+ +or, you may also copy and paste directly into your browser's URL bar.
+ +{{ reset_url }}+ +
+ Note: This URL is valid only for the next 24 hours. +
+ ++ If you did not request a password reset, you can safely ignore this email. +
++ Please click the link below to verify your email address. +
+ + + +or, you may also copy and paste directly into your browser's URL bar.
+ +{{ verification_url }}+ +
+ Note: This URL is valid only for the next 24 hours. +
+ +We embedded the compiled version for Ubuntu for that can be retrieved from its original repo here.
+We decided to do not use the version shipped by default Ubuntu packages because it were too old (2.1)
+At the last time of writing we uploaded the version 2.6-dev
+Built with @certego/certego-ui.
+src/components/common
.src/hooks
.src/stores
.public/ public static assets
+|- icons/ icons/favicon
+|- index.html/ root HTML file
+src/ source code
+|- components/ pages and components
+| |- auth/ `authentication` (login, logout, OAuth pages)
+| |- common/ small re-usable components
+| |- dashboard/ dashboard page and charts
+| |- home/ landing/home page
+| |- jobs/ `api_app`
+| | |- result/ JobResult.jsx
+| | |- table/ JobsTable.jsx
+| |- me/
+| | |- organization/ `certego_saas.apps.organization`
+| | |- sessions/ durin (sessions management)
+| |- misc/
+| | |- notification/ `certego_saas.apps.notifications`
+| |- plugins/ `api_app.analyzers_manager`, `api_app.connectors_manager`
+| |- scan/ new scan/job
+| |- Routes.jsx lazy route-component mappings
+|- constants/ constant values
+| |- api.js API URLs
+| |- environment.js environment variables
+| |- index.js intelowl specific constants
+|- hooks/ react hooks
+|- layouts/ header, main, footer containers
+|- stores/ zustand stores hooks
+|- styles/ scss files
+|- utils/ utility functions
+|- wrappers/ Higher-Order components
+|- App.jsx App component
+|- index.jsx Root JS file (ReactDOM renderer)
+
The frontend inside the docker containers does not hot-reload, so
+you need to use CRA dev server
on your host machine to serve pages when doing development on the frontend, using docker nginx only as API source.
http://localhost:80
node-js
installed, you have to do that. Follow the guide here. We tested this with NodeJS >=16.6http://localhost:3000
. It acts as proxy for API requests to original app web server.package.json
.certego-ui
package so it can also hot-reload. This is useful when you want to make changes in certego-ui and rapidly test them with IntelOwl. Refer here for setup instructions.We have dependabot enabled for the React.js frontend application. The updates are scheduled for once a week.
++ Welcome to IntelOwls Guide for First Time Visitors! For further + questions you could either check out our{" "} + docs or reach + us out on{" "} + + the official IntelOwl slack channel + +
++ Plugins are the core modular components of IntelOwl that can be + easily added, changed and customized. The most important ones are + the Analyzers that allow to perform data extraction on the + observables and/or files that you would like to analyze. +
+Write up your own plugin configuration!
++ Note: Some plugins work out-of-the-box, while others requires to be + configured (with API keys for instance). +
++ You could get started with analyzing various observables with just + three steps{" "} +
+Select/Add Observables
+
+ Select a Playbook.
+
Playbooks are designed to be easy to share sequence of
+ running Plugins (Analyzers/, Connectors, ...) on a particular kind
+ of observable.
+
+ Jobs are simple analysis of an observable or a file. Here you could + see the list of all previous jobs and expand over the details + through clicking that particular job from the table +
++ Investigations are a framework to connect jobs with each other. Here + you could see the list of all previous investigations and expand + over the details through clicking that particular investigation from + the table +
+See previous job details here with charts and more
+Filter by time to get details about previous jobs
+Verifying...
+ + ) : ( +
+ Please note that IntelOwl is operated as an invite-only trust group.
+ Once you sign up, our team will reach out to you at the provided email
+ address.
+
+
+ We recommend signing up with a business email address and not a
+ personal one to increase your chances of getting access.
+
+
+ If you are an admin please check the{" "} + + documentation + {" "} + and correctly configure all the required variables. +
+{subText}
+ + Read + +#{id}
++ No visualizers available. You can consult the results in the raw + format.{" "} +
+ ), + }); + } + + setUIElements(newUIElements); + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [job]); + + const elementsToShow = isSelectedUI ? UIElements : rawElements; + + return ( +{comment.content}
+ {user.username === comment.user.username && ( +Description is loading
+ + ) : ( + {markdownToHtml(plugin?.description)} + )} ++ No data to show in the UI. You can consult the results in the raw + format. +
+ ); + } + + return#{id}
++ You can choose to create a new organization or join an + existing one by asking an organization owner to send you an + invitation. +
++ You can choose to create a new organization. +
++ You can choose to create a new organization. +
+
+ User @{username} will be removed from your organization and
+ will no longer have access to the rules and{" "}
+ submissions
+ of your organization members.
+
+
+ Are you sure you wish to proceed?
+
+
+ You will be removed from the {orgName} organization and will no
+ longer have access to the rules and submissions
+ of the organization members.
+
+
+ Are you sure you wish to proceed?
+
+
+ Organization "{orgName}" will be deleted along with
+ every membership (user memberships, not user accounts) and invitations
+ too.
+
+
+ Are you sure you wish to proceed?
+
+
+ ( {pluginInfo?.python_module} )
+
+ )}
+ {pluginInfo?.maximum_tlp && (
+ {JSON.stringify(value, null, 2)}
+ {JSON.stringify(value.value, null, 2)}
+
+ ({value.type})
+ {value.env_var_key}
)
+
+ {value.required && (
+ + {!pluginInfo?.verification.configured && + pluginInfo?.verification.details} +
+
+ {JSON.stringify(
+ pluginInfo?.runtime_configuration[plugin][pluginName],
+ null,
+ 2,
+ )}
+
+ + Note: This is an irreversible operation. +
++ Once deleted, you cannot use this API key to access IntelOwl's + API. However, you will be able to generate a new one. +
+ Are you sure you wish to proceed ? +Do you want to get threat intelligence data about a malware, an IP address or a domain? Do you want to get this kind of data from multiple sources at the same time using a single API request?
+You are in the right place!
+IntelOwl is an Open Source solution for management of Threat Intelligence at scale. It integrates a number of analyzers available online and a lot of cutting-edge malware analysis tools.
+This application is built to scale out and to speed up the retrieval of threat info.
+It provides: +- Enrichment of Threat Intel for files as well as observables (IP, Domain, URL, hash, etc). +- A Fully-fledged REST APIs written in Django and Python. +- An easy way to be integrated in your stack of security tools to automate common jobs usually performed, for instance, by SOC analysts manually. (Thanks to the official libraries pyintelowl and go-intelowl) +- A built-in GUI: provides features such as dashboard, visualizations of analysis data, easy to use forms for requesting new analysis, etc. +- A framework composed of modular components called Plugins: + - analyzers that can be run to either retrieve data from external sources (like VirusTotal or AbuseIPDB) or to generate intel from internally available tools (like Yara or Oletools) + - connectors that can be run to export data to external platforms (like MISP or OpenCTI) + - pivots that are designed to trigger the execution of a chain of analysis and connect them to each other + - visualizers that are designed to create custom visualizations of analyzers results + - ingestors that allows to automatically ingest stream of observables or files to IntelOwl itself + - playbooks that are meant to make analysis easily repeatable
+We try hard to keep our documentation well written, easy to understand and always updated. +All info about installation, usage, configuration and contribution can be found here
+To know more about the project and its growth over time, you may be interested in reading the official blog posts and/or videos about the project by clicking on this link
+You can see the full list of all available analyzers in the documentation.
+Type | +Analyzers Available | +
---|---|
Inbuilt modules | +- Static Office Document, RTF, PDF, PE File Analysis and metadata extraction - Strings Deobfuscation and analysis (FLOSS, Stringsifter, ...) - PE Emulation with Qiling and Speakeasy - PE Signature verification - PE Capabilities Extraction (CAPA) - Javascript Emulation (Box-js) - Android Malware Analysis (Quark-Engine, ...) - SPF and DMARC Validator - Yara (a lot of public rules are available. You can also add your own rules) - more... |
+
External services | +- Abuse.ch MalwareBazaar/URLhaus/Threatfox/YARAify - GreyNoise v2 - Intezer - VirusTotal v3 - Crowdsec - URLscan - Shodan - AlienVault OTX - Intelligence_X - MISP - many more.. |
+
As open source project maintainers, we strongly rely on external support to get the resources and time to work on keeping the project alive, with a constant release of new features, bug fixes and general improvements.
+Because of this, we joined Open Collective to obtain non-profit equal level status which allows the organization to receive and manage donations transparently. Please support IntelOwl and all the community by choosing a plan (BRONZE, SILVER, etc).
+ +Certego is a MDR (Managed Detection and Response) and Threat Intelligence Provider based in Italy.
+IntelOwl was born out of Certego's Threat intelligence R&D division and is constantly maintained and updated thanks to them.
+The Honeynet Project is a non-profit organization working on creating open source cyber security tools and sharing knowledge about cyber threats.
+Thanks to Honeynet, we are hosting a public demo of the application here. If you are interested, please contact a member of Honeynet to get access to the public service.
+Since its birth this project has been participating in the Google Summer of Code (GSoC)!
+If you are interested in participating in the next Google Summer of Code, check all the info available in the dedicated repository!
+ThreatHunter.ai®, is a 100% Service-Disabled Veteran-Owned Small Business started in 2007 under the name Milton Security Group. ThreatHunter.ai is the global leader in Dynamic Threat Hunting. Operating a true 24x7x365 Security Operation Center with AI/ML-enhanced human Threat Hunters, ThreatHunter.ai has changed the industry in how threats are found, and mitigated in real time. For over 15 years, our teams of Threat Hunters have stopped hundreds of thousands of threats and assisted organizations in defending against threat actors around the clock.
+In 2021 IntelOwl joined the official Docker Open Source Program. This allows IntelOwl developers to easily manage Docker images and focus on writing the code. You may find the official IntelOwl Docker images here.
+In 2022 IntelOwl joined the official DigitalOcean Open Source Program.
+Feel free to contact the main developers at any time on Twitter:
+IntelOwl's maintainers are available to offer paid consultancy and mentorship.
+In the IntelOwl documentation site, we use Git submodules to manage multiple repositories as child repositories. This allows us to fetch updated code (including docstrings and API specs) automatically, reducing redundant work for developers.
+There are four submodules under the IntelOwlProject:
+These submodules are updated whenever we push new changes to our documentation site, here's the Github Action file.
+When you make changes to the IntelOwl codebase, it typically does not update automatically in the github repository of documentation site.
+While development if you want to update the submodules to latest changes you can do the following:
+git submodule foreach --recursive 'git fetch --all'
+git submodule update --init --remote --recursive --depth 1
+git submodule sync --recursive
+git submodule update --remote --recursive
+
However, if you need to test changes immediately, you can do the following:
+Point the submodule in .gitmodules
to your fork of the repository to check the updates instantly.
After modifying .gitmodules
, run the following command to fetch the latest changes:
This ensures that your documentation reflects the most recent code changes.
+Robust Python SDK and Command Line Client for interacting with IntelOwl's API.
+For development/testing, pip3 install pyintelowl[dev]
On successful installation, The pyintelowl
entryscript should be directly invokable. For example,
$ pyintelowl
+Usage: pyintelowl [OPTIONS] COMMAND [ARGS]...
+
+Options:
+ -d, --debug Set log level to DEBUG
+ --version Show the version and exit.
+ -h, --help Show this message and exit.
+
+Commands:
+ analyse Send new analysis request
+ analyzer-healthcheck Send healthcheck request for an analyzer...
+ config Set or view config variables
+ connector-healthcheck Send healthcheck request for a connector
+ jobs Manage Jobs
+ tags Manage tags
+
from pyintelowl import IntelOwl
+obj = IntelOwl("<your_api_key>", "<your_intelowl_instance_url>", "optional<path_to_pem_file>", "optional<proxies>")
+
For more comprehensive documentation, please see https://intelowlproject.github.io/docs/pyintelowl/.
+View CHANGELOG.md.
+You need a valid API key to interact with the IntelOwl server. +Keys can be created from the "API access" section of the user's menu in the IntelOwl's GUI.
+ +Otherwise, you can create them from the Django Admin Interface of the IntelOwl application with an administration account. Section "Durin" -> "Auth tokens"
+