+
-
+
- Alerts
- Timeline
- Topology
+ Alerts
+ Timeline
+ Topology
- Coming Soon...
+
+
+
Coming Soon...
diff --git a/keep-ui/app/incidents/create-or-update-incident.tsx b/keep-ui/app/incidents/create-or-update-incident.tsx
index 0d8c23972..fcfae9934 100644
--- a/keep-ui/app/incidents/create-or-update-incident.tsx
+++ b/keep-ui/app/incidents/create-or-update-incident.tsx
@@ -12,7 +12,7 @@ import { useSession } from "next-auth/react";
import { FormEvent, useEffect, useState } from "react";
import { toast } from "react-toastify";
import { getApiURL } from "utils/apiUrl";
-import { IncidentDto } from "./model";
+import { IncidentDto } from "./models";
import { useIncidents } from "utils/hooks/useIncidents";
interface Props {
diff --git a/keep-ui/app/incidents/incident-candidate-actions.tsx b/keep-ui/app/incidents/incident-candidate-actions.tsx
index e6170dd09..1a8a33e8f 100644
--- a/keep-ui/app/incidents/incident-candidate-actions.tsx
+++ b/keep-ui/app/incidents/incident-candidate-actions.tsx
@@ -1,6 +1,6 @@
import {getApiURL} from "../../utils/apiUrl";
import {toast} from "react-toastify";
-import {IncidentDto, PaginatedIncidentsDto} from "./model";
+import {IncidentDto, PaginatedIncidentsDto} from "./models";
import {Session} from "next-auth";
interface Props {
diff --git a/keep-ui/app/incidents/incident-change-status-modal.tsx b/keep-ui/app/incidents/incident-change-status-modal.tsx
new file mode 100644
index 000000000..886d205b8
--- /dev/null
+++ b/keep-ui/app/incidents/incident-change-status-modal.tsx
@@ -0,0 +1,153 @@
+import { Button, Title, Subtitle } from "@tremor/react";
+import Modal from "@/components/ui/Modal";
+import Select, {
+ CSSObjectWithLabel,
+ ControlProps,
+ OptionProps,
+ GroupBase,
+} from "react-select";
+import { useState } from "react";
+import { IncidentDto, Status } from "./models";
+import { getApiURL } from "utils/apiUrl";
+import { useSession } from "next-auth/react";
+import { toast } from "react-toastify";
+import {
+ CheckCircleIcon,
+ ExclamationCircleIcon,
+ PauseIcon,
+} from "@heroicons/react/24/outline";
+
+const statusIcons = {
+ [Status.Firing]:
,
+ [Status.Resolved]:
,
+ [Status.Acknowledged]:
,
+};
+
+const customSelectStyles = {
+ control: (
+ base: CSSObjectWithLabel,
+ state: ControlProps<
+ { value: Status; label: JSX.Element },
+ false,
+ GroupBase<{ value: Status; label: JSX.Element }>
+ >
+ ) => ({
+ ...base,
+ borderColor: state.isFocused ? "orange" : base.borderColor,
+ boxShadow: state.isFocused ? "0 0 0 1px orange" : base.boxShadow,
+ "&:hover": {
+ borderColor: "orange",
+ },
+ }),
+ option: (
+ base: CSSObjectWithLabel,
+ {
+ isFocused,
+ }: OptionProps<
+ { value: Status; label: JSX.Element },
+ false,
+ GroupBase<{ value: Status; label: JSX.Element }>
+ >
+ ) => ({
+ ...base,
+ backgroundColor: isFocused ? "rgba(255,165,0,0.1)" : base.backgroundColor,
+ "&:hover": {
+ backgroundColor: "rgba(255,165,0,0.2)",
+ },
+ }),
+};
+
+interface Props {
+ incident: IncidentDto | null | undefined;
+ mutate: () => void;
+ handleClose: () => void;
+}
+
+export default function IncidentChangeStatusModal({
+ incident,
+ mutate,
+ handleClose,
+}: Props) {
+ const { data: session } = useSession();
+ const [selectedStatus, setSelectedStatus] = useState
(null);
+ const [comment, setComment] = useState("");
+
+ if (!incident) return null;
+
+ const statusOptions = Object.values(Status)
+ .filter((status) => status !== incident.status) // Exclude current status
+ .map((status) => ({
+ value: status,
+ label: (
+
+ {statusIcons[status]}
+ {status.charAt(0).toUpperCase() + status.slice(1)}
+
+ ),
+ }));
+
+ const clearAndClose = () => {
+ setSelectedStatus(null);
+ handleClose();
+ };
+
+ const handleChangeStatus = async () => {
+ if (!selectedStatus) {
+ toast.error("Please select a new status.");
+ return;
+ }
+
+ try {
+ const response = await fetch(`${getApiURL()}/incidents/${incident.id}/status`, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${session?.accessToken}`,
+ },
+ body: JSON.stringify({
+ status: selectedStatus,
+ comment: comment,
+ }),
+ });
+
+ if (response.ok) {
+ toast.success("Incident status changed successfully!");
+ clearAndClose();
+ await mutate();
+ } else {
+ toast.error("Failed to change incident status.");
+ }
+ } catch (error) {
+ toast.error("An error occurred while changing incident status.");
+ }
+ };
+
+ return (
+
+ Change Incident Status
+
+ Change status from {incident.status} to:
+
+
+
+
+
+
+
+
+ );
+}
diff --git a/keep-ui/app/incidents/incident-pagination.tsx b/keep-ui/app/incidents/incident-pagination.tsx
index a404dd412..0aabc3354 100644
--- a/keep-ui/app/incidents/incident-pagination.tsx
+++ b/keep-ui/app/incidents/incident-pagination.tsx
@@ -10,7 +10,7 @@ import { Button, Text } from "@tremor/react";
import { StylesConfig, SingleValueProps, components, GroupBase } from 'react-select';
import Select from 'react-select';
import { Table } from "@tanstack/react-table";
-import {IncidentDto} from "./model";
+import {IncidentDto} from "./models";
import {AlertDto} from "../alerts/models";
interface Props {
diff --git a/keep-ui/app/incidents/incident-table-component.tsx b/keep-ui/app/incidents/incident-table-component.tsx
index 0116aa5a6..3bae86c36 100644
--- a/keep-ui/app/incidents/incident-table-component.tsx
+++ b/keep-ui/app/incidents/incident-table-component.tsx
@@ -1,7 +1,7 @@
import {Icon, Table, TableBody, TableCell, TableHead, TableHeaderCell, TableRow} from "@tremor/react";
import {flexRender, Header, Table as ReactTable} from "@tanstack/react-table";
import React, {ReactNode} from "react";
-import { IncidentDto } from "./model";
+import { IncidentDto } from "./models";
import { useRouter } from "next/navigation";
import {FaArrowDown, FaArrowRight, FaArrowUp} from "react-icons/fa";
interface Props {
diff --git a/keep-ui/app/incidents/incident.tsx b/keep-ui/app/incidents/incident.tsx
index a661860b5..4492799ec 100644
--- a/keep-ui/app/incidents/incident.tsx
+++ b/keep-ui/app/incidents/incident.tsx
@@ -2,7 +2,7 @@
import { Card, Title, Subtitle, Button, Badge } from "@tremor/react";
import Loading from "app/loading";
import { useState } from "react";
-import { IncidentDto } from "./model";
+import { IncidentDto } from "./models";
import CreateOrUpdateIncident from "./create-or-update-incident";
import IncidentsTable from "./incidents-table";
import { useIncidents, usePollIncidents } from "utils/hooks/useIncidents";
diff --git a/keep-ui/app/incidents/incidents-table.tsx b/keep-ui/app/incidents/incidents-table.tsx
index b885f529b..fb9b6a609 100644
--- a/keep-ui/app/incidents/incidents-table.tsx
+++ b/keep-ui/app/incidents/incidents-table.tsx
@@ -1,6 +1,7 @@
import {
Button,
Badge,
+ Icon,
} from "@tremor/react";
import {
ExpandedState,
@@ -11,14 +12,20 @@ import {
getSortedRowModel,
ColumnDef,
} from "@tanstack/react-table";
-import { MdRemoveCircle, MdModeEdit } from "react-icons/md";
+import {MdRemoveCircle, MdModeEdit, MdKeyboardDoubleArrowRight} from "react-icons/md";
import { useSession } from "next-auth/react";
-import {IncidentDto, PaginatedIncidentsDto} from "./model";
+import {IncidentDto, PaginatedIncidentsDto, Status} from "./models";
import React, {Dispatch, SetStateAction, useEffect, useState} from "react";
import Image from "next/image";
import IncidentPagination from "./incident-pagination";
import IncidentTableComponent from "./incident-table-component";
import {deleteIncident} from "./incident-candidate-actions";
+import {
+ CheckCircleIcon,
+ ExclamationCircleIcon,
+ PauseIcon,
+} from "@heroicons/react/24/outline";
+import IncidentChangeStatusModal from "./incident-change-status-modal";
const columnHelper = createColumnHelper();
@@ -31,6 +38,27 @@ interface Props {
editCallback: (rule: IncidentDto) => void;
}
+const STATUS_ICONS = {
+ [Status.Firing]: ,
+ [Status.Resolved]: ,
+ [Status.Acknowledged]: ,
+};
+
export default function IncidentsTable({
incidents: incidents,
mutate,
@@ -45,6 +73,13 @@ export default function IncidentsTable({
pageIndex: Math.ceil(incidents.offset / incidents.limit),
pageSize: incidents.limit,
});
+ const [changeStatusIncident, setChangeStatusIncident] = useState();
+
+ const handleChangeStatus = (e: React.MouseEvent, incident: IncidentDto) => {
+ e.preventDefault();
+ e.stopPropagation();
+ setChangeStatusIncident(incident);
+ }
useEffect(() => {
if (incidents.limit != pagination.pageSize) {
@@ -63,6 +98,11 @@ export default function IncidentsTable({
}, [pagination])
const columns = [
+ columnHelper.display({
+ id: "status",
+ header: "Status",
+ cell: ({ row }) => handleChangeStatus(e, row.original!)}>{STATUS_ICONS[row.original.status]},
+ }),
columnHelper.display({
id: "name",
header: "Name",
@@ -101,8 +141,8 @@ export default function IncidentsTable({
columnHelper.display({
id: "alert_sources",
header: "Alert Sources",
- cell: (context) =>
- context.row.original.alert_sources.map((alert_sources, index) => (
+ cell: ({ row }) =>
+ row.original.alert_sources.map((alert_sources, index) => (
row.original.assignee
+ cell: ({ row }) => row.original.assignee
}),
columnHelper.accessor("creation_time", {
id: "creation_time",
@@ -137,29 +177,39 @@ export default function IncidentsTable({
columnHelper.display({
id: "delete",
header: "",
- cell: (context) => (
+ cell: ({ row }) => (
{/*If user wants to edit the mapping. We use the callback to set the data in mapping.tsx which is then passed to the create-new-mapping.tsx form*/}
@@ -191,6 +241,11 @@ export default function IncidentsTable({
return (
+
setChangeStatusIncident(null)}
+ />
diff --git a/keep-ui/app/incidents/model.ts b/keep-ui/app/incidents/models.ts
similarity index 85%
rename from keep-ui/app/incidents/model.ts
rename to keep-ui/app/incidents/models.ts
index 8ca02a855..8f9ff2960 100644
--- a/keep-ui/app/incidents/model.ts
+++ b/keep-ui/app/incidents/models.ts
@@ -1,5 +1,12 @@
import {AlertDto} from "../alerts/models";
+export enum Status {
+ Firing = "firing",
+ Resolved = "resolved",
+ Acknowledged = "acknowledged",
+}
+
+
export interface IncidentDto {
id: string;
user_generated_name: string;
@@ -8,6 +15,7 @@ export interface IncidentDto {
generated_summary: string;
assignee: string;
severity: string;
+ status: Status;
alerts_count: number;
alert_sources: string[];
services: string[];
@@ -32,4 +40,3 @@ export interface PaginatedIncidentAlertsDto {
count: number;
items: AlertDto[];
}
-
diff --git a/keep-ui/app/incidents/predicted-incidents-table.tsx b/keep-ui/app/incidents/predicted-incidents-table.tsx
index 0fb7b412c..e674715fa 100644
--- a/keep-ui/app/incidents/predicted-incidents-table.tsx
+++ b/keep-ui/app/incidents/predicted-incidents-table.tsx
@@ -11,7 +11,7 @@ import {
} from "@tanstack/react-table";
import { MdDone, MdBlock} from "react-icons/md";
import { useSession } from "next-auth/react";
-import {IncidentDto, PaginatedIncidentsDto} from "./model";
+import {IncidentDto, PaginatedIncidentsDto} from "./models";
import React, { useState } from "react";
import Image from "next/image";
import { IncidentTableComponent } from "./incident-table-component";
diff --git a/keep-ui/app/workflows/[workflow_id]/workflow-execution-table.tsx b/keep-ui/app/workflows/[workflow_id]/workflow-execution-table.tsx
index 6ca7c1748..1a66001ee 100644
--- a/keep-ui/app/workflows/[workflow_id]/workflow-execution-table.tsx
+++ b/keep-ui/app/workflows/[workflow_id]/workflow-execution-table.tsx
@@ -113,6 +113,7 @@ function getTriggerIcon(triggered_by: string) {
case "Manual": return FaHandPointer;
case "Scheduler": return PiDiamondsFourFill;
case "Alert": return HiBellAlert;
+ case "Incident": return HiBellAlert;
default: return PiDiamondsFourFill;
}
}
@@ -159,6 +160,9 @@ export function ExecutionTable({
case triggered_by.substring(0, 6) === "manual":
valueToShow = "Manual";
break;
+ case triggered_by.substring(0, 8) === "incident":
+ valueToShow = "Incident";
+ break;
}
}
diff --git a/keep-ui/app/workflows/builder/CustomNode.tsx b/keep-ui/app/workflows/builder/CustomNode.tsx
index 8d7e43ea9..c98d868fa 100644
--- a/keep-ui/app/workflows/builder/CustomNode.tsx
+++ b/keep-ui/app/workflows/builder/CustomNode.tsx
@@ -16,6 +16,7 @@ import { toast } from "react-toastify";
function IconUrlProvider(data: FlowNode["data"]) {
const { componentType, type } = data || {};
if (type === "alert" || type === "workflow" || type === "trigger" || !type) return "/keep.png";
+ if (type === "incident" || type === "workflow" || type === "trigger" || !type) return "/keep.png";
return `/icons/${type
?.replace("step-", "")
?.replace("action-", "")
diff --git a/keep-ui/app/workflows/builder/ReactFlowEditor.tsx b/keep-ui/app/workflows/builder/ReactFlowEditor.tsx
index 6848dd13c..3cbea77ea 100644
--- a/keep-ui/app/workflows/builder/ReactFlowEditor.tsx
+++ b/keep-ui/app/workflows/builder/ReactFlowEditor.tsx
@@ -21,15 +21,23 @@ const ReactFlowEditor = ({
};
onDefinitionChange: (def: Definition) => void
}) => {
- const { selectedNode, changes, v2Properties, nodes, edges, setOpneGlobalEditor, synced, setSynced } = useStore();
+ const { selectedNode, changes, v2Properties, nodes, edges, setOpneGlobalEditor, synced, setSynced, setCanDeploy } = useStore();
const [isOpen, setIsOpen] = useState(false);
const stepEditorRef = useRef(null);
const containerRef = useRef(null);
- const isTrigger = ['interval', 'manual', 'alert'].includes(selectedNode || '')
+ const isTrigger = ['interval', 'manual', 'alert', 'incident'].includes(selectedNode || '')
+ const saveRef = useRef(false);
+ useEffect(()=>{
+ if(saveRef.current && synced){
+ setCanDeploy(true);
+ saveRef.current = false;
+ }
+ }, [saveRef?.current, synced])
useEffect(() => {
setIsOpen(true);
if (selectedNode) {
+ saveRef.current = false;
const timer = setTimeout(() => {
if (isTrigger) {
setOpneGlobalEditor(true);
@@ -114,9 +122,16 @@ const ReactFlowEditor = ({
-
+
{!selectedNode?.includes('empty') && !isTrigger &&
}
- {!selectedNode?.includes('empty') && !isTrigger &&
}
+ {!selectedNode?.includes('empty') && !isTrigger &&
}
diff --git a/keep-ui/app/workflows/builder/ToolBox.tsx b/keep-ui/app/workflows/builder/ToolBox.tsx
index 2f77cb8b3..eb9993bc5 100644
--- a/keep-ui/app/workflows/builder/ToolBox.tsx
+++ b/keep-ui/app/workflows/builder/ToolBox.tsx
@@ -34,6 +34,7 @@ const GroupedMenu = ({ name, steps, searchTerm, isDraggable = true }: {
function IconUrlProvider(data: any) {
const { type } = data || {};
if (type === "alert" || type === "workflow") return "/keep.png";
+ if (type === "incident" || type === "workflow") return "/keep.png";
return `/icons/${type
?.replace("step-", "")
?.replace("action-", "")
@@ -126,7 +127,7 @@ const DragAndDropSidebar = ({ isDraggable }: {
setIsVisible(!isDraggable)
}, [selectedNode, selectedEdge, isDraggable]);
- const triggerNodeMap = nodes.filter((node: any) => ['interval', 'manual', 'alert'].includes(node?.id)).reduce((obj: any, node: any) => {
+ const triggerNodeMap = nodes.filter((node: any) => ['interval', 'manual', 'alert', 'incident'].includes(node?.id)).reduce((obj: any, node: any) => {
obj[node.id] = true;
return obj;
}, {} as Record);
diff --git a/keep-ui/app/workflows/builder/builder-store.tsx b/keep-ui/app/workflows/builder/builder-store.tsx
index e19b7d6ea..202c32c19 100644
--- a/keep-ui/app/workflows/builder/builder-store.tsx
+++ b/keep-ui/app/workflows/builder/builder-store.tsx
@@ -149,6 +149,8 @@ export type FlowState = {
errorNode: string | null;
synced: boolean;
setSynced: (synced: boolean) => void;
+ canDeploy: boolean;
+ setCanDeploy: (deploy: boolean) => void;
};
@@ -242,6 +244,10 @@ function addNodeBetween(nodeOrEdge: string | null, step: V2Step, type: string, s
set({v2Properties: {...get().v2Properties, [newNodeId]: {}}});
break;
}
+ case "incident": {
+ set({v2Properties: {...get().v2Properties, [newNodeId]: {}}});
+ break;
+ }
}
}
@@ -260,6 +266,8 @@ const useStore = create((set, get) => ({
firstInitilisationDone: false,
errorNode: null,
synced: true,
+ canDeploy: false,
+ setCanDeploy: (deploy)=>set({canDeploy: deploy}),
setSynced: (sync) => set({ synced: sync }),
setErrorNode: (id) => set({ errorNode: id }),
setFirstInitilisationDone: (firstInitilisationDone) => set({ firstInitilisationDone }),
@@ -291,14 +299,14 @@ const useStore = create((set, get) => ({
});
set({
nodes: updatedNodes,
- changes: get().changes + 1
+ changes: get().changes + 1,
});
}
},
- setV2Properties: (properties) => set({ v2Properties: properties }),
+ setV2Properties: (properties) => set({ v2Properties: properties, canDeploy:false }),
updateV2Properties: (properties) => {
const updatedProperties = { ...get().v2Properties, ...properties };
- set({ v2Properties: updatedProperties, changes: get().changes + 1 });
+ set({ v2Properties: updatedProperties, changes: get().changes + 1, canDeploy:false });
},
setSelectedNode: (id) => {
set({
@@ -433,7 +441,7 @@ const useStore = create((set, get) => ({
finalEdges = edges.filter((edge) => !(idArray.includes(edge.source) || idArray.includes(edge.target)));
- if (['interval', 'alert', 'manual'].includes(ids) && edges.some((edge) => edge.source === 'trigger_start' && edge.target !== ids)) {
+ if (['interval', 'alert', 'manual', 'incident'].includes(ids) && edges.some((edge) => edge.source === 'trigger_start' && edge.target !== ids)) {
edges = edges.filter((edge) => !(idArray.includes(edge.source)));
}
const sources = [...new Set(edges.filter((edge) => startNode.id === edge.target))];
@@ -453,7 +461,7 @@ const useStore = create((set, get) => ({
const newNode = createDefaultNodeV2({ ...nodes[endIndex + 1].data, islayouted: false }, nodes[endIndex + 1].id);
const newNodes = [...nodes.slice(0, nodeStartIndex), newNode, ...nodes.slice(endIndex + 2)];
- if(['manual', 'alert', 'interval'].includes(ids)) {
+ if(['manual', 'alert', 'interval', 'incident'].includes(ids)) {
const v2Properties = get().v2Properties;
delete v2Properties[ids];
set({ v2Properties });
diff --git a/keep-ui/app/workflows/builder/builder-validators.tsx b/keep-ui/app/workflows/builder/builder-validators.tsx
index b9742a2ed..fdbcebab9 100644
--- a/keep-ui/app/workflows/builder/builder-validators.tsx
+++ b/keep-ui/app/workflows/builder/builder-validators.tsx
@@ -20,9 +20,10 @@ export function globalValidatorV2(
!!definition?.properties &&
!definition.properties['manual'] &&
!definition.properties['interval'] &&
- !definition.properties['alert']
+ !definition.properties['alert'] &&
+ !definition.properties['incident']
) {
- setGlobalValidationError('trigger_start', "Workflow Should alteast have one trigger.");
+ setGlobalValidationError('trigger_start', "Workflow Should at least have one trigger.");
return false;
}
@@ -38,6 +39,12 @@ export function globalValidatorV2(
return false;
}
+ const incidentActions = Object.values(definition.properties.incident||{}).filter(Boolean)
+ if(definition?.properties && definition.properties['incident'] && incidentActions.length==0){
+ setGlobalValidationError('incident', "Workflow incident trigger cannot be empty.");
+ return false;
+ }
+
const anyStepOrAction = definition?.sequence?.length > 0;
if (!anyStepOrAction) {
setGlobalValidationError(null,
diff --git a/keep-ui/app/workflows/builder/builder.tsx b/keep-ui/app/workflows/builder/builder.tsx
index 020d13bf4..08d60a4c5 100644
--- a/keep-ui/app/workflows/builder/builder.tsx
+++ b/keep-ui/app/workflows/builder/builder.tsx
@@ -27,6 +27,7 @@ import { WorkflowExecution, WorkflowExecutionFailure } from "./types";
import ReactFlowBuilder from "./ReactFlowBuilder";
import { ReactFlowProvider } from "@xyflow/react";
import useStore, { ReactFlowDefinition, V2Step, Definition as FlowDefinition } from "./builder-store";
+import { toast } from "react-toastify";
interface Props {
loadedAlertFile: string | null;
@@ -76,7 +77,7 @@ function Builder({
const [compiledAlert, setCompiledAlert] = useState(null);
const searchParams = useSearchParams();
- const { setErrorNode } = useStore();
+ const { errorNode, setErrorNode, canDeploy, synced } = useStore();
const setStepValidationErrorV2 = (step: V2Step, error: string | null) => {
setStepValidationError(error);
@@ -210,7 +211,12 @@ function Builder({
}, [triggerRun]);
useEffect(() => {
+
if (triggerSave) {
+ if(!synced) {
+ toast('Please save the previous step or wait while properties sync with the workflow.');
+ return;
+ }
if (workflowId) {
updateWorkflow();
} else {
@@ -220,6 +226,20 @@ function Builder({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [triggerSave]);
+ useEffect(()=>{
+ if (canDeploy && !errorNode && definition.isValid) {
+ if(!synced) {
+ toast('Please save the previous step or wait while properties sync with the workflow.');
+ return;
+ }
+ if (workflowId) {
+ updateWorkflow();
+ } else {
+ addWorkflow();
+ }
+ }
+ }, [canDeploy, errorNode, definition?.isValid])
+
useEffect(() => {
enableGenerate(
(definition.isValid &&
diff --git a/keep-ui/app/workflows/builder/editors.tsx b/keep-ui/app/workflows/builder/editors.tsx
index 53283f26b..cb34b2c51 100644
--- a/keep-ui/app/workflows/builder/editors.tsx
+++ b/keep-ui/app/workflows/builder/editors.tsx
@@ -7,26 +7,25 @@ import {
Subtitle,
Icon,
Button,
+ Switch,
Divider,
} from "@tremor/react";
import { KeyIcon } from "@heroicons/react/20/solid";
import { Provider } from "app/providers/providers";
import {
BackspaceIcon,
- BellSnoozeIcon,
- ClockIcon,
FunnelIcon,
- HandRaisedIcon,
} from "@heroicons/react/24/outline";
+import React from "react";
import useStore, { V2Properties } from "./builder-store";
-import { useEffect, useState } from "react";
+import { useEffect, useRef, useState } from "react";
function EditorLayout({ children }: { children: React.ReactNode }) {
return {children}
;
}
-export function GlobalEditorV2({synced}: {synced: boolean}) {
+export function GlobalEditorV2({ synced, saveRef }: { synced: boolean, saveRef: React.MutableRefObject; }) {
const { v2Properties: properties, updateV2Properties: setProperty, selectedNode } = useStore();
return (
@@ -45,6 +44,7 @@ export function GlobalEditorV2({synced}: {synced: boolean}) {
properties={properties}
setProperties={setProperty}
selectedNode={selectedNode}
+ saveRef={saveRef}
/>
);
@@ -58,7 +58,7 @@ interface keepEditorProps {
installedProviders?: Provider[] | null | undefined;
providerType?: string;
type?: string;
- isV2?:boolean
+ isV2?: boolean
}
function KeepStepEditor({
@@ -142,7 +142,7 @@ function KeepStepEditor({
placeholder="Enter provider name manually"
onChange={(e: any) => updateProperty("config", e.target.value)}
className="my-2.5"
- value={providerConfig}
+ value={providerConfig || ""}
error={
providerConfig !== "" &&
providerConfig !== undefined &&
@@ -151,14 +151,13 @@ function KeepStepEditor({
(p) => p.details?.name === providerConfig
) === undefined
}
- errorMessage={`${
- providerConfig && isThisProviderNeedsInstallation &&
- installedProviderByType?.find(
- (p) => p.details?.name === providerConfig
- ) === undefined
+ errorMessage={`${providerConfig && isThisProviderNeedsInstallation &&
+ installedProviderByType?.find(
+ (p) => p.details?.name === providerConfig
+ ) === undefined
? "Please note this provider is not installed and you'll need to install it before executing this workflow."
: ""
- }`}
+ }`}
/>
Provider Parameters
@@ -168,7 +167,7 @@ function KeepStepEditor({
placeholder="If Condition"
onValueChange={(value) => updateProperty("if", value)}
className="mb-2.5"
- value={properties?.if as string}
+ value={properties?.if || "" as string}
/>
{uniqueParams
@@ -186,7 +185,7 @@ function KeepStepEditor({
placeholder={key}
onChange={propertyChanged}
className="mb-2.5"
- value={currentPropertyValue}
+ value={currentPropertyValue || ""}
/>
);
@@ -258,19 +257,22 @@ function KeepForeachEditor({ properties, updateProperty }: keepEditorProps) {
function WorkflowEditorV2({
properties,
setProperties,
- selectedNode
+ selectedNode,
+ saveRef
}: {
properties: V2Properties;
setProperties: (updatedProperties: V2Properties) => void;
selectedNode: string | null;
+ saveRef: React.MutableRefObject
;
}) {
- const isTrigger = ['interval', 'manual', 'alert'].includes(selectedNode || '')
-
const updateAlertFilter = (filter: string, value: string) => {
const currentFilters = properties.alert || {};
const updatedFilters = { ...currentFilters, [filter]: value };
setProperties({ ...properties, alert: updatedFilters });
+ if (saveRef.current) {
+ saveRef.current = false
+ }
};
const addFilter = () => {
@@ -280,13 +282,25 @@ function WorkflowEditorV2({
}
};
-
const deleteFilter = (filter: string) => {
const currentFilters = { ...properties.alert };
delete currentFilters[filter];
setProperties({ ...properties, alert: currentFilters });
+ if (saveRef.current) {
+ saveRef.current = false
+ }
};
+ const handleChange = (key: string, value: string) => {
+ setProperties({
+ ...properties,
+ [key]: value,
+ });
+ if (saveRef.current) {
+ saveRef.current = false
+ }
+ }
+
const propertyKeys = Object.keys(properties).filter(
(k) => k !== "isLocked" && k !== "id"
);
@@ -295,121 +309,136 @@ function WorkflowEditorV2({
<>
Workflow Settings
{propertyKeys.map((key, index) => {
- const isTrigger = ["manual", "alert", 'interval'].includes(key) ;
+ const isTrigger = ["manual", "alert", 'interval', 'incident'].includes(key);
renderDivider = isTrigger && key === selectedNode ? !renderDivider : false;
return (
-
- {renderDivider &&
}
- {((key === selectedNode) || (!isTrigger)) &&
{key}}
-
- {(() => {
- switch (key) {
- case "manual":
- return (
- selectedNode === "manual" && (
-
-
- setProperties({
- ...properties,
- [key]: e.target.checked ? "true" : "false",
- })
- }
- disabled={true}
- />
-
- )
- );
-
- case "alert":
- return (
- selectedNode === "alert" && (
- <>
-
-
- Add Filter
-
-
- {properties.alert &&
- Object.keys(properties.alert as {}).map((filter) => {
- return (
- <>
-
{filter}
-
-
- updateAlertFilter(filter, e.target.value)
- }
- value={(properties.alert as any)[filter] as string}
- />
- deleteFilter(filter)}
- />
-
- >
- );
- })}
- >
- )
- );
-
- case "interval":
- return (
- selectedNode === "interval" && (
-
- setProperties({ ...properties, [key]: e.target.value })
- }
- value={properties[key] as string}
- />
- )
- );
- case "disabled":
- return (
-
-
- setProperties({
- ...properties,
- [key]: e.target.checked ? "true" : "false",
- })
- }
- />
-
- );
- default:
- return (
+
+ {renderDivider &&
}
+ {((key === selectedNode) || (!isTrigger)) &&
{key}}
+
+ {(() => {
+ switch (key) {
+ case "manual":
+ return (
+ selectedNode === "manual" && (
+
+
+ handleChange(key, e.target.checked ? "true" : "false")
+ }
+ disabled={true}
+ />
+
+ )
+ );
+
+ case "alert":
+ return (
+ selectedNode === "alert" && (
+ <>
+
+
+ Add Filter
+
+
+ {properties.alert &&
+ Object.keys(properties.alert as {}).map((filter) => {
+ return (
+ <>
+
{filter}
+
- setProperties({ ...properties, [key]: e.target.value })
- }
- value={properties[key] as string}
+ key={filter}
+ placeholder={`Set alert ${filter}`}
+ onChange={(e: any) =>
+ updateAlertFilter(filter, e.target.value)
+ }
+ value={(properties.alert as any)[filter] || "" as string}
+ />
+ deleteFilter(filter)}
+ />
+
+ >
+ );
+ })}
+ >
+ )
+ );
+
+ case "incident":
+ return selectedNode === 'incident' && <>
+
Incident events
+ {Array("created", "updated", "deleted").map((event) =>
+
+ -1}
+ onChange={() => {
+ let events = properties.incident.events || [];
+ if (events.indexOf(event) > -1) {
+ events = (events as string[]).filter(e => e !== event)
+ setProperties({ ...properties, [key]: {events: events } })
+ } else {
+ events.push(event);
+ setProperties({ ...properties, [key]: {events: events} })
+ }
+ }}
+ color={"orange"}
+ />
+
+
+ )}
+ >;
+ case "interval":
+ return selectedNode === "interval" && (
+ handleChange(key, e.target.value)
+ }
+ value={properties[key] || ""as string}
+ />);
+ case "disabled":
+ return (
+
+
+ handleChange(key, e.target.checked ? "true" : "false")
+ }
+ />
+
+ );
+ default:
+ return (
+
+ handleChange(key, e.target.value)
+ }
+ value={properties[key] || ""as string}
/>
);
}
})()}
);
-
})}
>
);
@@ -420,25 +449,28 @@ function WorkflowEditorV2({
export function StepEditorV2({
providers,
installedProviders,
- setSynced
+ setSynced,
+ saveRef
}: {
providers: Provider[] | undefined | null;
installedProviders?: Provider[] | undefined | null;
- setSynced: (sync:boolean) => void;
+ setSynced: (sync: boolean) => void;
+ saveRef: React.MutableRefObject;
}) {
- const [formData, setFormData] = useState<{ name?: string; properties?: V2Properties, type?:string }>({});
+ const [formData, setFormData] = useState<{ name?: string; properties?: V2Properties, type?: string }>({});
const {
selectedNode,
updateSelectedNodeData,
- setOpneGlobalEditor,
- getNodeById
+ getNodeById,
} = useStore();
+ const deployRef = useRef(null);
+
useEffect(() => {
if (selectedNode) {
const { data } = getNodeById(selectedNode) || {};
const { name, type, properties } = data || {};
- setFormData({ name, type , properties });
+ setFormData({ name, type, properties });
}
}, [selectedNode, getNodeById]);
@@ -457,6 +489,9 @@ export function StepEditorV2({
properties: { ...formData.properties, [key]: value },
});
setSynced(false);
+ if (saveRef.current) {
+ saveRef.current = false;
+ }
};
@@ -464,13 +499,17 @@ export function StepEditorV2({
// Finalize the changes before saving
updateSelectedNodeData('name', formData.name);
updateSelectedNodeData('properties', formData.properties);
+ setSynced(false);
+ if (saveRef && deployRef?.current?.checked) {
+ saveRef.current = true;
+ }
};
const type = formData ? formData.type?.includes("step-") || formData.type?.includes("action-") : "";
return (
- {providerType} Editor
+ {providerType}1 Editor
Unique Identifier
- {type && formData.properties ? (
+ {type && formData.properties ? (
) : null}
+
+ Deploy
+
+
- Save
+ Save & Deploy
);
diff --git a/keep-ui/app/workflows/builder/utils.tsx b/keep-ui/app/workflows/builder/utils.tsx
index 4ac183c58..0f6309eba 100644
--- a/keep-ui/app/workflows/builder/utils.tsx
+++ b/keep-ui/app/workflows/builder/utils.tsx
@@ -69,6 +69,17 @@ export function getToolboxConfiguration(providers: Provider[]) {
}
},
},
+ {
+ type: "incident",
+ componentType: "trigger",
+ name: "Incident",
+ id: 'incident',
+ properties: {
+ incident: {
+ events: [],
+ }
+ },
+ },
],
},
{
@@ -298,6 +309,8 @@ export function parseWorkflow(
}, {});
} else if (currType === "manual") {
value = "true";
+ } else if (currType === "incident") {
+ value = {events: curr.events};
}
prev[currType] = value;
return prev;
@@ -508,7 +521,12 @@ export function buildAlert(definition: Definition): Alert {
value: alert.properties.interval,
});
}
-
+ if (alert.properties.incident) {
+ triggers.push({
+ type: "incident",
+ events: alert.properties.incident.events,
+ });
+ }
return {
id: alertId,
name: name,
diff --git a/keep-ui/app/workflows/workflow-tile.tsx b/keep-ui/app/workflows/workflow-tile.tsx
index 73da76321..7e3da37ba 100644
--- a/keep-ui/app/workflows/workflow-tile.tsx
+++ b/keep-ui/app/workflows/workflow-tile.tsx
@@ -551,31 +551,32 @@ function WorkflowTile({ workflow }: { workflow: Workflow }) {
)}
{
- e.stopPropagation();
- e.preventDefault();
- if (workflow.id) {
- router.push(`/workflows/${workflow.id}`);
- }
- }}
+ className="relative flex flex-col justify-between bg-white rounded shadow p-2 h-full hover:border-orange-400 hover:border-2 overflow-hidden"
+ onClick={(e) => {
+ e.stopPropagation();
+ e.preventDefault();
+ if (workflow.id) {
+ router.push(`/workflows/${workflow.id}`);
+ }
+ }}
>
-
+
{workflow.provisioned && (
-
+
Provisioned
)}
- {!!handleRunClick && WorkflowMenuSection({
- onDelete: handleDeleteClick,
- onRun: handleRunClick,
- onDownload: handleDownloadClick,
- onView: handleViewClick,
- onBuilder: handleBuilderClick,
- runButtonToolTip: message,
- isRunButtonDisabled: !!isRunButtonDisabled,
- provisioned: workflow.provisioned,
- })}
+ {!!handleRunClick &&
+ WorkflowMenuSection({
+ onDelete: handleDeleteClick,
+ onRun: handleRunClick,
+ onDownload: handleDownloadClick,
+ onView: handleViewClick,
+ onBuilder: handleBuilderClick,
+ runButtonToolTip: message,
+ isRunButtonDisabled: !!isRunButtonDisabled,
+ provisioned: workflow.provisioned,
+ })}
@@ -634,7 +635,7 @@ function WorkflowTile({ workflow }: { workflow: Workflow }) {
className="object-cover"
/>
) : (
-
+
)}
Trigger
@@ -684,9 +685,9 @@ function WorkflowTile({ workflow }: { workflow: Workflow }) {
- {!!getTriggerModalProps && }
+ {!!getTriggerModalProps && (
+
+ )}
{
@@ -760,7 +761,6 @@ export function WorkflowTileOld({ workflow }: { workflow: Workflow }) {
setFormErrors(updatedFormErrors);
};
-
const handleDeleteClick = async () => {
try {
const response = await fetch(`${apiUrl}/workflows/${workflow.id}`, {
@@ -863,16 +863,17 @@ export function WorkflowTileOld({ workflow }: { workflow: Workflow }) {
{workflow.name}
- {!!handleRunClick && WorkflowMenuSection({
- onDelete: handleDeleteClick,
- onRun: handleRunClick,
- onDownload: handleDownloadClick,
- onView: handleViewClick,
- onBuilder: handleBuilderClick,
- runButtonToolTip: message,
- isRunButtonDisabled: !!isRunButtonDisabled,
- provisioned: workflow.provisioned,
- })}
+ {!!handleRunClick &&
+ WorkflowMenuSection({
+ onDelete: handleDeleteClick,
+ onRun: handleRunClick,
+ onDownload: handleDownloadClick,
+ onView: handleViewClick,
+ onBuilder: handleBuilderClick,
+ runButtonToolTip: message,
+ isRunButtonDisabled: !!isRunButtonDisabled,
+ provisioned: workflow.provisioned,
+ })}
@@ -920,9 +921,7 @@ export function WorkflowTileOld({ workflow }: { workflow: Workflow }) {
Disabled
-
- {workflow?.disabled?.toString()}
-
+ {workflow?.disabled?.toString()}
@@ -1023,9 +1022,9 @@ export function WorkflowTileOld({ workflow }: { workflow: Workflow }) {
)}
- {!!getTriggerModalProps &&
}
+ {!!getTriggerModalProps && (
+
+ )}
);
}
diff --git a/keep-ui/package-lock.json b/keep-ui/package-lock.json
index 399070b34..a0e3da6b4 100644
--- a/keep-ui/package-lock.json
+++ b/keep-ui/package-lock.json
@@ -231,7 +231,7 @@
"mz": "^2.7.0",
"nanoid": "^3.3.6",
"natural-compare": "^1.4.0",
- "next": "^14.2.1",
+ "next": "^14.2.12",
"next-auth": "^4.24.7",
"node-releases": "^2.0.10",
"normalize-path": "^3.0.0",
@@ -272,7 +272,7 @@
"postcss-nested": "^6.0.1",
"postcss-selector-parser": "^6.0.12",
"postcss-value-parser": "^4.2.0",
- "posthog-js": "^1.157.2",
+ "posthog-js": "^1.161.6",
"posthog-node": "^3.1.1",
"preact-render-to-string": "^5.2.6",
"prelude-ls": "^1.2.1",
@@ -372,6 +372,7 @@
},
"devDependencies": {
"@tailwindcss/typography": "^0.5.12",
+ "@types/d3-time-format": "^4.0.3",
"@types/js-cookie": "^3.0.3",
"@types/js-yaml": "^4.0.5",
"@types/json-logic-js": "^2.0.7",
@@ -3062,9 +3063,9 @@
"integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg=="
},
"node_modules/@next/env": {
- "version": "14.2.1",
- "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.1.tgz",
- "integrity": "sha512-qsHJle3GU3CmVx7pUoXcghX4sRN+vINkbLdH611T8ZlsP//grzqVW87BSUgOZeSAD4q7ZdZicdwNe/20U2janA=="
+ "version": "14.2.12",
+ "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.12.tgz",
+ "integrity": "sha512-3fP29GIetdwVIfIRyLKM7KrvJaqepv+6pVodEbx0P5CaMLYBtx+7eEg8JYO5L9sveJO87z9eCReceZLi0hxO1Q=="
},
"node_modules/@next/eslint-plugin-next": {
"version": "14.2.1",
@@ -3118,9 +3119,9 @@
}
},
"node_modules/@next/swc-darwin-arm64": {
- "version": "14.2.1",
- "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.1.tgz",
- "integrity": "sha512-kGjnjcIJehEcd3rT/3NAATJQndAEELk0J9GmGMXHSC75TMnvpOhONcjNHbjtcWE5HUQnIHy5JVkatrnYm1QhVw==",
+ "version": "14.2.12",
+ "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.12.tgz",
+ "integrity": "sha512-crHJ9UoinXeFbHYNok6VZqjKnd8rTd7K3Z2zpyzF1ch7vVNKmhjv/V7EHxep3ILoN8JB9AdRn/EtVVyG9AkCXw==",
"cpu": [
"arm64"
],
@@ -3133,9 +3134,9 @@
}
},
"node_modules/@next/swc-darwin-x64": {
- "version": "14.2.1",
- "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.1.tgz",
- "integrity": "sha512-dAdWndgdQi7BK2WSXrx4lae7mYcOYjbHJUhvOUnJjMNYrmYhxbbvJ2xElZpxNxdfA6zkqagIB9He2tQk+l16ew==",
+ "version": "14.2.12",
+ "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.12.tgz",
+ "integrity": "sha512-JbEaGbWq18BuNBO+lCtKfxl563Uw9oy2TodnN2ioX00u7V1uzrsSUcg3Ep9ce+P0Z9es+JmsvL2/rLphz+Frcw==",
"cpu": [
"x64"
],
@@ -3148,9 +3149,9 @@
}
},
"node_modules/@next/swc-linux-arm64-gnu": {
- "version": "14.2.1",
- "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.1.tgz",
- "integrity": "sha512-2ZctfnyFOGvTkoD6L+DtQtO3BfFz4CapoHnyLTXkOxbZkVRgg3TQBUjTD/xKrO1QWeydeo8AWfZRg8539qNKrg==",
+ "version": "14.2.12",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.12.tgz",
+ "integrity": "sha512-qBy7OiXOqZrdp88QEl2H4fWalMGnSCrr1agT/AVDndlyw2YJQA89f3ttR/AkEIP9EkBXXeGl6cC72/EZT5r6rw==",
"cpu": [
"arm64"
],
@@ -3163,9 +3164,9 @@
}
},
"node_modules/@next/swc-linux-arm64-musl": {
- "version": "14.2.1",
- "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.1.tgz",
- "integrity": "sha512-jazZXctiaanemy4r+TPIpFP36t1mMwWCKMsmrTRVChRqE6putyAxZA4PDujx0SnfvZHosjdkx9xIq9BzBB5tWg==",
+ "version": "14.2.12",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.12.tgz",
+ "integrity": "sha512-EfD9L7o9biaQxjwP1uWXnk3vYZi64NVcKUN83hpVkKocB7ogJfyH2r7o1pPnMtir6gHZiGCeHKagJ0yrNSLNHw==",
"cpu": [
"arm64"
],
@@ -3178,9 +3179,9 @@
}
},
"node_modules/@next/swc-linux-x64-gnu": {
- "version": "14.2.1",
- "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.1.tgz",
- "integrity": "sha512-VjCHWCjsAzQAAo8lkBOLEIkBZFdfW+Z18qcQ056kL4KpUYc8o59JhLDCBlhg+hINQRgzQ2UPGma2AURGOH0+Qg==",
+ "version": "14.2.12",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.12.tgz",
+ "integrity": "sha512-iQ+n2pxklJew9IpE47hE/VgjmljlHqtcD5UhZVeHICTPbLyrgPehaKf2wLRNjYH75udroBNCgrSSVSVpAbNoYw==",
"cpu": [
"x64"
],
@@ -3193,9 +3194,9 @@
}
},
"node_modules/@next/swc-linux-x64-musl": {
- "version": "14.2.1",
- "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.1.tgz",
- "integrity": "sha512-7HZKYKvAp4nAHiHIbY04finRqjeYvkITOGOurP1aLMexIFG/1+oCnqhGogBdc4lao/lkMW1c+AkwWSzSlLasqw==",
+ "version": "14.2.12",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.12.tgz",
+ "integrity": "sha512-rFkUkNwcQ0ODn7cxvcVdpHlcOpYxMeyMfkJuzaT74xjAa5v4fxP4xDk5OoYmPi8QNLDs3UgZPMSBmpBuv9zKWA==",
"cpu": [
"x64"
],
@@ -3208,9 +3209,9 @@
}
},
"node_modules/@next/swc-win32-arm64-msvc": {
- "version": "14.2.1",
- "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.1.tgz",
- "integrity": "sha512-YGHklaJ/Cj/F0Xd8jxgj2p8po4JTCi6H7Z3Yics3xJhm9CPIqtl8erlpK1CLv+HInDqEWfXilqatF8YsLxxA2Q==",
+ "version": "14.2.12",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.12.tgz",
+ "integrity": "sha512-PQFYUvwtHs/u0K85SG4sAdDXYIPXpETf9mcEjWc0R4JmjgMKSDwIU/qfZdavtP6MPNiMjuKGXHCtyhR/M5zo8g==",
"cpu": [
"arm64"
],
@@ -3223,9 +3224,9 @@
}
},
"node_modules/@next/swc-win32-ia32-msvc": {
- "version": "14.2.1",
- "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.1.tgz",
- "integrity": "sha512-o+ISKOlvU/L43ZhtAAfCjwIfcwuZstiHVXq/BDsZwGqQE0h/81td95MPHliWCnFoikzWcYqh+hz54ZB2FIT8RA==",
+ "version": "14.2.12",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.12.tgz",
+ "integrity": "sha512-FAj2hMlcbeCV546eU2tEv41dcJb4NeqFlSXU/xL/0ehXywHnNpaYajOUvn3P8wru5WyQe6cTZ8fvckj/2XN4Vw==",
"cpu": [
"ia32"
],
@@ -3238,9 +3239,9 @@
}
},
"node_modules/@next/swc-win32-x64-msvc": {
- "version": "14.2.1",
- "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.1.tgz",
- "integrity": "sha512-GmRoTiLcvCLifujlisknv4zu9/C4i9r0ktsA8E51EMqJL4bD4CpO7lDYr7SrUxCR0tS4RVcrqKmCak24T0ohaw==",
+ "version": "14.2.12",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.12.tgz",
+ "integrity": "sha512-yu8QvV53sBzoIVRHsxCHqeuS8jYq6Lrmdh0briivuh+Brsp6xjg80MAozUsBTAV9KNmY08KlX0KYTWz1lbPzEg==",
"cpu": [
"x64"
],
@@ -4023,6 +4024,12 @@
"resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.3.tgz",
"integrity": "sha512-2p6olUZ4w3s+07q3Tm2dbiMZy5pCDfYwtLXXHUnVzXgQlZ/OyPtUz6OL382BkOuGlLXqfT+wqv8Fw2v8/0geBw=="
},
+ "node_modules/@types/d3-time-format": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.3.tgz",
+ "integrity": "sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg==",
+ "dev": true
+ },
"node_modules/@types/d3-timer": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz",
@@ -10722,11 +10729,11 @@
}
},
"node_modules/next": {
- "version": "14.2.1",
- "resolved": "https://registry.npmjs.org/next/-/next-14.2.1.tgz",
- "integrity": "sha512-SF3TJnKdH43PMkCcErLPv+x/DY1YCklslk3ZmwaVoyUfDgHKexuKlf9sEfBQ69w+ue8jQ3msLb+hSj1T19hGag==",
+ "version": "14.2.12",
+ "resolved": "https://registry.npmjs.org/next/-/next-14.2.12.tgz",
+ "integrity": "sha512-cDOtUSIeoOvt1skKNihdExWMTybx3exnvbFbb9ecZDIxlvIbREQzt9A5Km3Zn3PfU+IFjyYGsHS+lN9VInAGKA==",
"dependencies": {
- "@next/env": "14.2.1",
+ "@next/env": "14.2.12",
"@swc/helpers": "0.5.5",
"busboy": "1.6.0",
"caniuse-lite": "^1.0.30001579",
@@ -10741,15 +10748,15 @@
"node": ">=18.17.0"
},
"optionalDependencies": {
- "@next/swc-darwin-arm64": "14.2.1",
- "@next/swc-darwin-x64": "14.2.1",
- "@next/swc-linux-arm64-gnu": "14.2.1",
- "@next/swc-linux-arm64-musl": "14.2.1",
- "@next/swc-linux-x64-gnu": "14.2.1",
- "@next/swc-linux-x64-musl": "14.2.1",
- "@next/swc-win32-arm64-msvc": "14.2.1",
- "@next/swc-win32-ia32-msvc": "14.2.1",
- "@next/swc-win32-x64-msvc": "14.2.1"
+ "@next/swc-darwin-arm64": "14.2.12",
+ "@next/swc-darwin-x64": "14.2.12",
+ "@next/swc-linux-arm64-gnu": "14.2.12",
+ "@next/swc-linux-arm64-musl": "14.2.12",
+ "@next/swc-linux-x64-gnu": "14.2.12",
+ "@next/swc-linux-x64-musl": "14.2.12",
+ "@next/swc-win32-arm64-msvc": "14.2.12",
+ "@next/swc-win32-ia32-msvc": "14.2.12",
+ "@next/swc-win32-x64-msvc": "14.2.12"
},
"peerDependencies": {
"@opentelemetry/api": "^1.1.0",
@@ -11592,10 +11599,9 @@
"integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ=="
},
"node_modules/posthog-js": {
- "version": "1.157.2",
- "resolved": "https://registry.npmjs.org/posthog-js/-/posthog-js-1.157.2.tgz",
- "integrity": "sha512-ATYKGs+Q51u26nHHhrhWNh1whqFm7j/rwQQYw+y6/YzNmRlo+YsqrGZji9nqXb9/4fo0ModDr+ZmuOI3hKkUXA==",
- "license": "MIT",
+ "version": "1.161.6",
+ "resolved": "https://registry.npmjs.org/posthog-js/-/posthog-js-1.161.6.tgz",
+ "integrity": "sha512-UO0z/YTuan55Kl5Yg9Xs5x1PKUkm2zGKUNPioznb4GLRcxFnLBkWoeKQXNro2YZsYJvK+MY8jlF3cdGa8BZ8/Q==",
"dependencies": {
"fflate": "^0.4.8",
"preact": "^10.19.3",
diff --git a/keep-ui/package.json b/keep-ui/package.json
index ea9f96480..e96b0ddee 100644
--- a/keep-ui/package.json
+++ b/keep-ui/package.json
@@ -232,7 +232,7 @@
"mz": "^2.7.0",
"nanoid": "^3.3.6",
"natural-compare": "^1.4.0",
- "next": "^14.2.1",
+ "next": "^14.2.12",
"next-auth": "^4.24.7",
"node-releases": "^2.0.10",
"normalize-path": "^3.0.0",
@@ -273,7 +273,7 @@
"postcss-nested": "^6.0.1",
"postcss-selector-parser": "^6.0.12",
"postcss-value-parser": "^4.2.0",
- "posthog-js": "^1.157.2",
+ "posthog-js": "^1.161.6",
"posthog-node": "^3.1.1",
"preact-render-to-string": "^5.2.6",
"prelude-ls": "^1.2.1",
@@ -373,6 +373,7 @@
},
"devDependencies": {
"@tailwindcss/typography": "^0.5.12",
+ "@types/d3-time-format": "^4.0.3",
"@types/js-cookie": "^3.0.3",
"@types/js-yaml": "^4.0.5",
"@types/json-logic-js": "^2.0.7",
diff --git a/keep-ui/tailwind.config.js b/keep-ui/tailwind.config.js
index 5d7f0c604..1646a014b 100644
--- a/keep-ui/tailwind.config.js
+++ b/keep-ui/tailwind.config.js
@@ -8,6 +8,10 @@ module.exports = {
darkMode: "class",
theme: {
extend: {
+ gridTemplateColumns: {
+ 20: "repeat(20, minmax(0, 1fr))",
+ 24: "repeat(24, minmax(0, 1fr))",
+ },
minHeight: {
"screen-minus-200": "calc(100vh - 200px)",
},
diff --git a/keep-ui/utils/hooks/useAlerts.ts b/keep-ui/utils/hooks/useAlerts.ts
index 53b343b93..23d73139c 100644
--- a/keep-ui/utils/hooks/useAlerts.ts
+++ b/keep-ui/utils/hooks/useAlerts.ts
@@ -6,6 +6,15 @@ import { getApiURL } from "utils/apiUrl";
import { fetcher } from "utils/fetcher";
import { toDateObjectWithFallback } from "utils/helpers";
+export type AuditEvent = {
+ id: string;
+ user_id: string;
+ action: string;
+ description: string;
+ timestamp: string;
+ fingerprint: string;
+};
+
export const useAlerts = () => {
const apiUrl = getApiURL();
const { data: session } = useSession();
@@ -33,7 +42,8 @@ export const useAlerts = () => {
options: SWRConfiguration = { revalidateOnFocus: false }
) => {
return useSWR
(
- () => (session && presetName ? `${apiUrl}/preset/${presetName}/alerts` : null),
+ () =>
+ session && presetName ? `${apiUrl}/preset/${presetName}/alerts` : null,
(url) => fetcher(url, session?.accessToken),
options
);
@@ -78,12 +88,32 @@ export const useAlerts = () => {
};
};
+ const useMultipleFingerprintsAlertAudit = (
+ fingerprints: string[] | undefined,
+ options: SWRConfiguration = { revalidateOnFocus: true }
+ ) => {
+ return useSWR(
+ () => (session && fingerprints ? `${apiUrl}/alerts/audit` : null),
+ (url) =>
+ fetcher(url, session?.accessToken, {
+ method: "POST",
+ body: JSON.stringify(fingerprints),
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${session?.accessToken}`,
+ },
+ }),
+ options
+ );
+ };
+
const useAlertAudit = (
fingerprint: string,
options: SWRConfiguration = { revalidateOnFocus: false }
) => {
- return useSWR(
- () => (session && fingerprint ? `${apiUrl}/alerts/${fingerprint}/audit` : null),
+ return useSWR(
+ () =>
+ session && fingerprint ? `${apiUrl}/alerts/${fingerprint}/audit` : null,
(url) => fetcher(url, session?.accessToken),
options
);
@@ -93,6 +123,7 @@ export const useAlerts = () => {
useAlertHistory,
useAllAlerts,
usePresetAlerts,
- useAlertAudit
+ useAlertAudit,
+ useMultipleFingerprintsAlertAudit,
};
};
diff --git a/keep-ui/utils/hooks/useIncidents.ts b/keep-ui/utils/hooks/useIncidents.ts
index e263da38c..63ec605df 100644
--- a/keep-ui/utils/hooks/useIncidents.ts
+++ b/keep-ui/utils/hooks/useIncidents.ts
@@ -1,4 +1,4 @@
-import {IncidentDto, PaginatedIncidentAlertsDto, PaginatedIncidentsDto} from "app/incidents/model";
+import {IncidentDto, PaginatedIncidentAlertsDto, PaginatedIncidentsDto} from "../../app/incidents/models";
import { useSession } from "next-auth/react";
import useSWR, { SWRConfiguration } from "swr";
import { getApiURL } from "utils/apiUrl";
diff --git a/keep-ui/utils/hooks/useTopology.ts b/keep-ui/utils/hooks/useTopology.ts
index 5286f1cc0..372a9e0ef 100644
--- a/keep-ui/utils/hooks/useTopology.ts
+++ b/keep-ui/utils/hooks/useTopology.ts
@@ -5,7 +5,7 @@ import useSWR from "swr";
import { getApiURL } from "utils/apiUrl";
import { fetcher } from "utils/fetcher";
import { useWebsocket } from "./usePusher";
-import { useCallback, useEffect } from "react";
+import { useCallback, useEffect, useState } from "react";
import { toast } from "react-toastify";
import { useApplications } from "./useApplications";
@@ -59,8 +59,7 @@ export const useTopology = (
environment?: string
) => {
const { data: session } = useSession();
-
- useTopologyPolling();
+ const { data: pollTopology } = useTopologyPolling();
const url = buildTopologyUrl({ session, providerId, service, environment });
@@ -71,6 +70,12 @@ export const useTopology = (
const { applications } = useApplications();
+ useEffect(() => {
+ if (pollTopology) {
+ mutate();
+ }
+ }, [pollTopology, mutate]);
+
// TODO: remove once endpoint returns application data
if (data) {
const dataWithApplications = data.map((service) => {
@@ -101,12 +106,14 @@ export const useTopology = (
export const useTopologyPolling = () => {
const { bind, unbind } = useWebsocket();
+ const [pollTopology, setPollTopology] = useState(0);
const handleIncoming = useCallback((data: TopologyUpdate) => {
toast.success(
`Topology pulled from ${data.providerId} (${data.providerType})`,
{ position: "top-right" }
);
+ setPollTopology(Math.floor(Math.random() * 10000));
}, []);
useEffect(() => {
@@ -115,4 +122,6 @@ export const useTopologyPolling = () => {
unbind("topology-update", handleIncoming);
};
}, [bind, unbind, handleIncoming]);
+
+ return { data: pollTopology };
};
diff --git a/keep-ui/utils/reactFlow.ts b/keep-ui/utils/reactFlow.ts
index 7888f5541..3356112e2 100644
--- a/keep-ui/utils/reactFlow.ts
+++ b/keep-ui/utils/reactFlow.ts
@@ -460,7 +460,7 @@ export function getTriggerStep(properties: V2Properties) {
}
Object.keys(properties).forEach((key) => {
- if (['interval', 'manual', 'alert'].includes(key) && properties[key]) {
+ if (['interval', 'manual', 'alert', 'incident'].includes(key) && properties[key]) {
_steps.push({
id: key,
type: key,
diff --git a/keep/api/api.py b/keep/api/api.py
index bed995b86..86c3445ea 100644
--- a/keep/api/api.py
+++ b/keep/api/api.py
@@ -323,7 +323,7 @@ async def catch_exception(request: Request, exc: Exception):
)
@app.middleware("http")
- async def log_middeware(request: Request, call_next):
+ async def log_middleware(request: Request, call_next):
identity = _extract_identity(request, attribute="keep_tenant_id")
logger.info(
f"Request started: {request.method} {request.url.path}",
diff --git a/keep/api/arq_worker.py b/keep/api/arq_worker.py
index 1e0667888..29d9af3f1 100644
--- a/keep/api/arq_worker.py
+++ b/keep/api/arq_worker.py
@@ -63,8 +63,7 @@
FUNCTIONS: list = (
[
- import_string(background_function)
- for background_function in list(ARQ_BACKGROUND_FUNCTIONS)
+ import_string(background_function) for background_function in list(ARQ_BACKGROUND_FUNCTIONS)
]
if ARQ_BACKGROUND_FUNCTIONS is not None
else list()
@@ -86,13 +85,15 @@ def get_arq_worker(queue_name: str) -> Worker:
expires = config(
"ARQ_EXPIRES", cast=int, default=3600
) # the default length of time from when a job is expected to start after which the job expires, making it shorter to avoid clogging
-
+ expires_ai = config(
+ "ARQ_EXPIRES_AI", cast=int, default=3600*1000
+ )
# generate a worker id so each worker will have a different health check key
worker_id = str(uuid4()).replace("-", "")
worker = create_worker(
WorkerSettings,
keep_result=keep_result,
- expires_extra_ms=expires,
+ expires_extra_ms=expires_ai if KEEP_ARQ_TASK_POOL == KEEP_ARQ_TASK_POOL_AI else expires,
queue_name=queue_name,
health_check_key=f"{queue_name}:{worker_id}:health-check",
)
diff --git a/keep/api/bl/enrichments_bl.py b/keep/api/bl/enrichments_bl.py
index 3cec23e0e..d8f18691c 100644
--- a/keep/api/bl/enrichments_bl.py
+++ b/keep/api/bl/enrichments_bl.py
@@ -278,7 +278,10 @@ def _check_alert_matches_rule(self, alert: AlertDto, rule: MappingRule) -> bool:
)
if not topology_service:
- self.logger.warning("No topology service found to match on")
+ self.logger.debug(
+ "No topology service found to match on",
+ extra={"matcher_value": matcher_value},
+ )
else:
enrichments = topology_service.dict(exclude_none=True)
# Remove redundant fields
diff --git a/keep/api/core/db.py b/keep/api/core/db.py
index f8ea824b0..89c550c6d 100644
--- a/keep/api/core/db.py
+++ b/keep/api/core/db.py
@@ -15,7 +15,6 @@
from uuid import uuid4
import numpy as np
-import pandas as pd
import validators
from dotenv import find_dotenv, load_dotenv
from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor
@@ -38,7 +37,6 @@
from keep.api.models.db.preset import * # pylint: disable=unused-wildcard-import
from keep.api.models.db.provider import * # pylint: disable=unused-wildcard-import
from keep.api.models.db.rule import * # pylint: disable=unused-wildcard-import
-from keep.api.models.db.statistics import * # pylint: disable=unused-wildcard-import
from keep.api.models.db.tenant import * # pylint: disable=unused-wildcard-import
from keep.api.models.db.topology import * # pylint: disable=unused-wildcard-import
from keep.api.models.db.workflow import * # pylint: disable=unused-wildcard-import
@@ -110,6 +108,7 @@ def create_workflow_execution(
event_id: str = None,
fingerprint: str = None,
execution_id: str = None,
+ event_type: str = "alert",
) -> str:
with Session(engine) as session:
try:
@@ -128,13 +127,21 @@ def create_workflow_execution(
# Ensure the object has an id
session.flush()
execution_id = workflow_execution.id
- if fingerprint:
+ if fingerprint and event_type == "alert":
workflow_to_alert_execution = WorkflowToAlertExecution(
workflow_execution_id=execution_id,
alert_fingerprint=fingerprint,
event_id=event_id,
)
session.add(workflow_to_alert_execution)
+ elif event_type == "incident":
+ workflow_to_incident_execution = WorkflowToIncidentExecution(
+ workflow_execution_id=execution_id,
+ alert_fingerprint=fingerprint,
+ incident_id=event_id,
+ )
+ session.add(workflow_to_incident_execution)
+
session.commit()
return execution_id
except IntegrityError:
@@ -689,9 +696,8 @@ def get_workflow_executions(
).scalar()
avgDuration = avgDuration if avgDuration else 0.0
- query = (
- query.order_by(desc(WorkflowExecution.started)).limit(limit).offset(offset)
- )
+ query = (query.order_by(desc(WorkflowExecution.started)).limit(limit).offset(offset)
+)
# Execute the query
workflow_executions = query.all()
@@ -1077,6 +1083,7 @@ def query_alerts(
upper_timestamp=None,
lower_timestamp=None,
skip_alerts_with_null_timestamp=True,
+ sort_ascending=False,
) -> list[Alert]:
"""
Get all alerts for a given tenant_id.
@@ -1127,8 +1134,13 @@ def query_alerts(
if skip_alerts_with_null_timestamp:
query = query.filter(Alert.timestamp.isnot(None))
- # Order by timestamp in descending order and limit the results
- query = query.order_by(Alert.timestamp.desc()).limit(limit)
+ if sort_ascending:
+ query = query.order_by(Alert.timestamp.asc())
+ else:
+ query = query.order_by(Alert.timestamp.desc())
+
+ if limit:
+ query = query.limit(limit)
# Execute the query
alerts = query.all()
@@ -1738,7 +1750,6 @@ def update_key_last_used(
session.add(tenant_api_key_entry)
session.commit()
-
def get_linked_providers(tenant_id: str) -> List[Tuple[str, str, datetime]]:
with Session(engine) as session:
providers = (
@@ -2091,16 +2102,38 @@ def get_incidents(tenant_id) -> List[Incident]:
def get_alert_audit(
- tenant_id: str, fingerprint: str, limit: int = 50
+ tenant_id: str, fingerprint: str | list[str], limit: int = 50
) -> List[AlertAudit]:
+ """
+ Get the alert audit for the given fingerprint(s).
+
+ Args:
+ tenant_id (str): the tenant_id to filter the alert audit by
+ fingerprint (str | list[str]): the fingerprint(s) to filter the alert audit by
+ limit (int, optional): the maximum number of alert audits to return. Defaults to 50.
+
+ Returns:
+ List[AlertAudit]: the alert audit for the given fingerprint(s)
+ """
with Session(engine) as session:
- audit = session.exec(
- select(AlertAudit)
- .where(AlertAudit.tenant_id == tenant_id)
- .where(AlertAudit.fingerprint == fingerprint)
- .order_by(desc(AlertAudit.timestamp))
- .limit(limit)
- ).all()
+ if isinstance(fingerprint, list):
+ query = (
+ select(AlertAudit)
+ .where(AlertAudit.tenant_id == tenant_id)
+ .where(AlertAudit.fingerprint.in_(fingerprint))
+ .order_by(desc(AlertAudit.timestamp), AlertAudit.fingerprint)
+ )
+ if limit:
+ query = query.limit(limit)
+ audit = session.exec(query).all()
+ else:
+ audit = session.exec(
+ select(AlertAudit)
+ .where(AlertAudit.tenant_id == tenant_id)
+ .where(AlertAudit.fingerprint == fingerprint)
+ .order_by(desc(AlertAudit.timestamp))
+ .limit(limit)
+ ).all()
return audit
@@ -2168,6 +2201,7 @@ def get_last_incidents(
is_confirmed: bool = False,
sorting: Optional[IncidentSorting] = IncidentSorting.creation_time,
with_alerts: bool = False,
+ is_predicted: bool = None,
) -> Tuple[list[Incident], int]:
"""
Get the last incidents and total amount of incidents.
@@ -2195,6 +2229,9 @@ def get_last_incidents(
if with_alerts:
query = query.options(joinedload(Incident.alerts))
+ if is_predicted is not None:
+ query = query.filter(Incident.is_predicted == is_predicted)
+
if timeframe:
query = query.filter(
Incident.start_time
@@ -2223,7 +2260,7 @@ def get_last_incidents(
return incidents, total_count
-def get_incident_by_id(tenant_id: str, incident_id: str | UUID) -> Optional[Incident]:
+def get_incident_by_id(tenant_id: str, incident_id: str | UUID, with_alerts: bool = False) -> Optional[Incident]:
with Session(engine) as session:
query = session.query(
Incident,
@@ -2231,6 +2268,8 @@ def get_incident_by_id(tenant_id: str, incident_id: str | UUID) -> Optional[Inci
Incident.tenant_id == tenant_id,
Incident.id == incident_id,
)
+ if with_alerts:
+ query= query.options(joinedload(Incident.alerts))
return query.first()
@@ -2335,7 +2374,7 @@ def get_incidents_count(
def get_incident_alerts_by_incident_id(
- tenant_id: str, incident_id: str, limit: int, offset: int
+ tenant_id: str, incident_id: str, limit: Optional[int] = None, offset: Optional[int] = None
) -> (List[Alert], int):
with Session(engine) as session:
query = (
@@ -2353,7 +2392,10 @@ def get_incident_alerts_by_incident_id(
total_count = query.count()
- return query.limit(limit).offset(offset).all(), total_count
+ if limit and offset:
+ query = query.limit(limit).offset(offset)
+
+ return query.all(), total_count
def get_alerts_data_for_incident(
@@ -2416,50 +2458,53 @@ def inner(db_session: Session):
def add_alerts_to_incident_by_incident_id(
tenant_id: str, incident_id: str | UUID, alert_ids: List[UUID]
) -> Optional[Incident]:
+ logger.info(f"Adding alerts to incident {incident_id} in database, total {len(alert_ids)} alerts",
+ extra={"tags": {"tenant_id": tenant_id, "incident_id": incident_id}})
+
with Session(engine) as session:
- incident = session.exec(
- select(Incident).where(
- Incident.tenant_id == tenant_id,
- Incident.id == incident_id,
- )
- ).first()
+ query = select(Incident).where(
+ Incident.tenant_id == tenant_id,
+ Incident.id == incident_id,
+ )
+ incident = session.exec(query).first()
if not incident:
return None
- existed_alert_ids = session.exec(
- select(AlertToIncident.alert_id).where(
- AlertToIncident.tenant_id == tenant_id,
- AlertToIncident.incident_id == incident.id,
- col(AlertToIncident.alert_id).in_(alert_ids),
- )
- ).all()
+ # Use a set for faster membership checks
+ existing_alert_ids = set(
+ session.exec(
+ select(AlertToIncident.alert_id).where(
+ AlertToIncident.tenant_id == tenant_id,
+ AlertToIncident.incident_id == incident.id,
+ col(AlertToIncident.alert_id).in_(alert_ids),
+ )
+ ).all()
+ )
- new_alert_ids = [
- alert_id for alert_id in alert_ids if alert_id not in existed_alert_ids
- ]
+ new_alert_ids = [alert_id for alert_id in alert_ids if alert_id not in existing_alert_ids]
if not new_alert_ids:
return incident
alerts_data_for_incident = get_alerts_data_for_incident(new_alert_ids, session)
- incident.sources = list(
- set(incident.sources) | set(alerts_data_for_incident["sources"])
- )
- incident.affected_services = list(
- set(incident.affected_services) | set(alerts_data_for_incident["services"])
- )
+ incident.sources = list(set(incident.sources) | set(alerts_data_for_incident["sources"]))
+ incident.affected_services = list(set(incident.affected_services) | set(alerts_data_for_incident["services"]))
incident.alerts_count += alerts_data_for_incident["count"]
alert_to_incident_entries = [
- AlertToIncident(
- alert_id=alert_id, incident_id=incident.id, tenant_id=tenant_id
- )
+ AlertToIncident(alert_id=alert_id, incident_id=incident.id, tenant_id=tenant_id)
for alert_id in new_alert_ids
]
- session.bulk_save_objects(alert_to_incident_entries)
+ for idx, entry in enumerate(alert_to_incident_entries):
+ session.add(entry)
+ if (idx + 1) % 100 == 0:
+ logger.info(f"Added {idx + 1}/{len(alert_to_incident_entries)} alerts to incident {incident.id} in database",
+ extra={"tags": {"tenant_id": tenant_id, "incident_id": incident.id}})
+ session.commit()
+ session.flush()
started_at, last_seen_at = session.exec(
select(func.min(Alert.timestamp), func.max(Alert.timestamp))
@@ -2469,9 +2514,9 @@ def add_alerts_to_incident_by_incident_id(
AlertToIncident.incident_id == incident.id,
)
).one()
+
incident.start_time = started_at
incident.last_seen_time = last_seen_at
-
incident.severity = alerts_data_for_incident["max_severity"].order
session.add(incident)
@@ -2526,25 +2571,27 @@ def remove_alerts_to_incident_by_incident_id(
# checking if services of removed alerts are still presented in alerts
# which still assigned with the incident
- services_existed = session.exec(
- session.query(func.distinct(service_field))
+ existed_services_query = (
+ select(func.distinct(service_field))
.join(AlertToIncident, Alert.id == AlertToIncident.alert_id)
.filter(
AlertToIncident.incident_id == incident_id,
service_field.in_(alerts_data_for_incident["services"]),
)
- ).scalars()
+ )
+ services_existed = session.exec(existed_services_query)
# checking if sources (providers) of removed alerts are still presented in alerts
# which still assigned with the incident
- sources_existed = session.exec(
- session.query(col(Alert.provider_type).distinct())
+ existed_sources_query = (
+ select(col(Alert.provider_type).distinct())
.join(AlertToIncident, Alert.id == AlertToIncident.alert_id)
.filter(
AlertToIncident.incident_id == incident_id,
col(Alert.provider_type).in_(alerts_data_for_incident["sources"]),
)
- ).scalars()
+ )
+ sources_existed = session.exec(existed_sources_query)
# Making lists of services and sources to remove from the incident
services_to_remove = [
@@ -2658,78 +2705,6 @@ def write_pmi_matrix_to_temp_file(
return True
-def write_pmi_matrix_to_db(tenant_id: str, pmi_matrix_df: pd.DataFrame) -> bool:
- # TODO: add handlers for sequential launches
- with Session(engine) as session:
- pmi_entries_to_update = 0
- pmi_entries_to_insert = []
-
- # Query for existing entries to differentiate between updates and inserts
- existing_entries = session.query(PMIMatrix).filter_by(tenant_id=tenant_id).all()
- existing_entries_dict = {
- (entry.fingerprint_i, entry.fingerprint_j): entry
- for entry in existing_entries
- }
-
- for fingerprint_i in pmi_matrix_df.index:
- for fingerprint_j in pmi_matrix_df.columns:
- if pmi_matrix_df.at[fingerprint_i, fingerprint_j] == -100:
- continue
-
- pmi = float(pmi_matrix_df.at[fingerprint_i, fingerprint_j])
-
- pmi_entry = {
- "tenant_id": tenant_id,
- "fingerprint_i": fingerprint_i,
- "fingerprint_j": fingerprint_j,
- "pmi": pmi,
- }
-
- if (fingerprint_i, fingerprint_j) in existing_entries_dict:
- existed_entry = existing_entries_dict[
- (fingerprint_i, fingerprint_j)
- ]
- if existed_entry.pmi != pmi:
- session.execute(
- update(PMIMatrix)
- .where(
- PMIMatrix.fingerprint_i == fingerprint_i,
- PMIMatrix.fingerprint_j == fingerprint_j,
- PMIMatrix.tenant_id == tenant_id,
- )
- .values(pmi=pmi)
- )
- pmi_entries_to_update += 1
- else:
- pmi_entries_to_insert.append(pmi_entry)
-
- if pmi_entries_to_insert:
- session.bulk_insert_mappings(PMIMatrix, pmi_entries_to_insert)
-
- logger.info(
- f"PMI matrix for tenant {tenant_id} updated. {pmi_entries_to_update} entries updated, {len(pmi_entries_to_insert)} entries inserted",
- extra={"tenant_id": tenant_id},
- )
-
- session.commit()
-
- return True
-
-
-def get_pmi_value(
- tenant_id: str, fingerprint_i: str, fingerprint_j: str
-) -> Optional[float]:
- with Session(engine) as session:
- pmi_entry = session.exec(
- select(PMIMatrix)
- .where(PMIMatrix.tenant_id == tenant_id)
- .where(PMIMatrix.fingerprint_i == fingerprint_i)
- .where(PMIMatrix.fingerprint_j == fingerprint_j)
- ).first()
-
- return pmi_entry.pmi if pmi_entry else None
-
-
def get_pmi_values_from_temp_file(temp_dir: str) -> Tuple[np.array, Dict[str, int]]:
npzfile = np.load(f"{temp_dir}/pmi_matrix.npz", allow_pickle=True)
pmi_matrix = npzfile["pmi_matrix"]
@@ -2740,18 +2715,25 @@ def get_pmi_values_from_temp_file(temp_dir: str) -> Tuple[np.array, Dict[str, in
return pmi_matrix, fingerint2idx
-def get_pmi_values(
- tenant_id: str, fingerprints: List[str]
-) -> Dict[Tuple[str, str], Optional[float]]:
+def get_tenant_config(tenant_id: str) -> dict:
with Session(engine) as session:
- pmi_entries = session.exec(
- select(PMIMatrix).where(PMIMatrix.tenant_id == tenant_id)
- ).all()
+ tenant_data = session.exec(
+ select(Tenant)
+ .where(Tenant.id == tenant_id)
+ ).first()
+ return tenant_data.configuration if tenant_data else {}
- pmi_values = {
- (entry.fingerprint_i, entry.fingerprint_j): entry.pmi for entry in pmi_entries
- }
- return pmi_values
+
+def write_tenant_config(tenant_id: str, config: dict) -> None:
+ with Session(engine) as session:
+ tenant_data = session.exec(
+ select(Tenant)
+ .where(Tenant.id == tenant_id)
+ ).first()
+ tenant_data.configuration = config
+ session.commit()
+ session.refresh(tenant_data)
+ return tenant_data
def update_incident_summary(
@@ -2896,3 +2878,18 @@ def get_provider_by_name(tenant_id: str, provider_name: str) -> Provider:
.where(Provider.name == provider_name)
).first()
return provider
+
+
+def change_incident_status_by_id(tenant_id: str, incident_id: UUID | str, status: IncidentStatus) -> bool:
+ with Session(engine) as session:
+ stmt = (
+ update(Incident)
+ .where(
+ Incident.tenant_id == tenant_id,
+ Incident.id == incident_id,
+ )
+ .values(status=status.value)
+ )
+ updated = session.execute(stmt)
+ session.commit()
+ return updated.rowcount > 0
diff --git a/keep/api/core/db_on_start.py b/keep/api/core/db_on_start.py
index a39abf1e7..b65dc698d 100644
--- a/keep/api/core/db_on_start.py
+++ b/keep/api/core/db_on_start.py
@@ -150,6 +150,7 @@ def try_create_single_tenant(tenant_id: str) -> None:
pass
logger.info(f"Api key {api_key_name} provisioned")
logger.info("Api keys provisioned")
+
# commit the changes
session.commit()
logger.info("Single tenant created")
@@ -180,4 +181,4 @@ def migrate_db():
os.path.dirname(os.path.abspath(__file__)) + "/../models/db/migrations",
)
alembic.command.upgrade(config, "head")
- logger.info("Finished migrations")
+ logger.info("Finished migrations")
\ No newline at end of file
diff --git a/keep/api/models/alert.py b/keep/api/models/alert.py
index 472b8f465..bb4c97eb8 100644
--- a/keep/api/models/alert.py
+++ b/keep/api/models/alert.py
@@ -4,11 +4,11 @@
import logging
import uuid
from enum import Enum
-from typing import Any, Dict
+from typing import Any, Dict, List
from uuid import UUID
import pytz
-from pydantic import AnyHttpUrl, BaseModel, Extra, root_validator, validator
+from pydantic import AnyHttpUrl, BaseModel, Extra, root_validator, validator, PrivateAttr
logger = logging.getLogger(__name__)
@@ -92,6 +92,15 @@ class AlertStatus(Enum):
PENDING = "pending"
+class IncidentStatus(Enum):
+ # Active incident
+ FIRING = "firing"
+ # Incident has been resolved
+ RESOLVED = "resolved"
+ # Incident has been acknowledged but not resolved
+ ACKNOWLEDGED = "acknowledged"
+
+
class IncidentSeverity(SeverityBaseInterface):
CRITICAL = ("critical", 5)
HIGH = ("high", 4)
@@ -348,6 +357,7 @@ class Config:
"id": "c2509cb3-6168-4347-b83b-a41da9df2d5b",
"name": "Incident name",
"user_summary": "Keep: Incident description",
+ "status": "firing",
}
]
}
@@ -363,6 +373,7 @@ class IncidentDto(IncidentDtoIn):
alerts_count: int
alert_sources: list[str]
severity: IncidentSeverity
+ status: IncidentStatus = IncidentStatus.FIRING
assignee: str | None
services: list[str]
@@ -374,6 +385,8 @@ class IncidentDto(IncidentDtoIn):
rule_fingerprint: str | None
+ _tenant_id: str = PrivateAttr()
+
def __str__(self) -> str:
# Convert the model instance to a dictionary
model_dict = self.dict()
@@ -382,12 +395,40 @@ def __str__(self) -> str:
class Config:
extra = Extra.allow
schema_extra = IncidentDtoIn.Config.schema_extra
+ underscore_attrs_are_private = True
json_encoders = {
# Converts UUID to their values for JSON serialization
UUID: lambda v: str(v),
}
+ @property
+ def name(self):
+ return self.user_generated_name or self.ai_generated_name
+
+ @property
+ def alerts(self) -> List["AlertDto"]:
+ from keep.api.core.db import get_incident_alerts_by_incident_id
+ from keep.api.utils.enrichment_helpers import convert_db_alerts_to_dto_alerts
+ if not self._tenant_id:
+ return []
+ alerts, _ = get_incident_alerts_by_incident_id(self._tenant_id, str(self.id))
+ return convert_db_alerts_to_dto_alerts(alerts)
+
+ @root_validator(pre=True)
+ def set_default_values(cls, values: Dict[str, Any]) -> Dict[str, Any]:
+ # Check and set default status
+ status = values.get("status")
+ try:
+ values["status"] = IncidentStatus(status)
+ except ValueError:
+ logging.warning(
+ f"Invalid status value: {status}, setting default.",
+ extra={"event": values},
+ )
+ values["status"] = IncidentStatus.FIRING
+ return values
+
@classmethod
def from_db_incident(cls, db_incident):
@@ -395,7 +436,7 @@ def from_db_incident(cls, db_incident):
if isinstance(db_incident.severity, int) \
else db_incident.severity
- return cls(
+ dto = cls(
id=db_incident.id,
user_generated_name=db_incident.user_generated_name,
ai_generated_name = db_incident.ai_generated_name,
@@ -410,7 +451,17 @@ def from_db_incident(cls, db_incident):
alerts_count=db_incident.alerts_count,
alert_sources=db_incident.sources,
severity=severity,
+ status=db_incident.status,
assignee=db_incident.assignee,
services=db_incident.affected_services,
rule_fingerprint=db_incident.rule_fingerprint,
)
+
+ # This field is required for getting alerts when required
+ dto._tenant_id = db_incident.tenant_id
+ return dto
+
+
+class IncidentStatusChangeDto(BaseModel):
+ status: IncidentStatus
+ comment: str | None
\ No newline at end of file
diff --git a/keep/api/models/alert_audit.py b/keep/api/models/alert_audit.py
new file mode 100644
index 000000000..f03a5af67
--- /dev/null
+++ b/keep/api/models/alert_audit.py
@@ -0,0 +1,57 @@
+from datetime import datetime
+
+from pydantic import BaseModel
+
+from keep.api.models.db.alert import AlertActionType, AlertAudit
+
+
+class AlertAuditDto(BaseModel):
+ id: str
+ timestamp: datetime
+ fingerprint: str
+ action: AlertActionType
+ user_id: str
+ description: str
+
+ @classmethod
+ def from_orm(cls, alert_audit: AlertAudit) -> "AlertAuditDto":
+ return cls(
+ id=str(alert_audit.id),
+ timestamp=alert_audit.timestamp,
+ fingerprint=alert_audit.fingerprint,
+ action=alert_audit.action,
+ user_id=alert_audit.user_id,
+ description=alert_audit.description,
+ )
+
+ @classmethod
+ def from_orm_list(cls, alert_audits: list[AlertAudit]) -> list["AlertAuditDto"]:
+ grouped_events = []
+ previous_event = None
+ count = 1
+
+ for event in alert_audits:
+ # Check if the current event is similar to the previous event
+ if previous_event and (
+ event.user_id == previous_event.user_id
+ and event.action == previous_event.action
+ and event.description == previous_event.description
+ ):
+ # Increment the count if the events are similar
+ count += 1
+ else:
+ # If the events are not similar, append the previous event to the grouped events
+ if previous_event:
+ if count > 1:
+ previous_event.description += f" x{count}"
+ grouped_events.append(AlertAuditDto.from_orm(previous_event))
+ # Update the previous event to the current event and reset the count
+ previous_event = event
+ count = 1
+
+ # Add the last event to the grouped events
+ if previous_event:
+ if count > 1:
+ previous_event.description += f" x{count}"
+ grouped_events.append(AlertAuditDto.from_orm(previous_event))
+ return grouped_events
diff --git a/keep/api/models/db/alert.py b/keep/api/models/db/alert.py
index bfd5e46cf..4e26bd989 100644
--- a/keep/api/models/db/alert.py
+++ b/keep/api/models/db/alert.py
@@ -13,7 +13,7 @@
from keep.api.consts import RUNNING_IN_CLOUD_RUN
from keep.api.core.config import config
-from keep.api.models.alert import IncidentSeverity
+from keep.api.models.alert import IncidentSeverity, IncidentStatus
from keep.api.models.db.tenant import Tenant
db_connection_string = config("DATABASE_CONNECTION_STRING", default=None)
@@ -69,6 +69,7 @@ class Incident(SQLModel, table=True):
assignee: str | None
severity: int = Field(default=IncidentSeverity.CRITICAL.order)
+ status: str = Field(default=IncidentStatus.FIRING.value, index=True)
creation_time: datetime = Field(default_factory=datetime.utcnow)
diff --git a/keep/api/models/db/mapping.py b/keep/api/models/db/mapping.py
index 2081656ea..2f0e0ab79 100644
--- a/keep/api/models/db/mapping.py
+++ b/keep/api/models/db/mapping.py
@@ -2,6 +2,7 @@
from typing import Literal, Optional
from pydantic import BaseModel, validator
+from sqlalchemy import String
from sqlmodel import JSON, Column, Field, SQLModel
@@ -19,7 +20,14 @@ class MappingRule(SQLModel, table=True):
override: bool = Field(default=True)
condition: Optional[str] = Field(max_length=2000)
# The type of this mapping rule
- type: str = "csv"
+ type: str = Field(
+ sa_column=Column(
+ String(255),
+ name="type",
+ server_default="csv",
+ ),
+ max_length=255,
+ )
# The attributes to match against (e.g. ["service","region"])
matchers: list[str] = Field(sa_column=Column(JSON), nullable=False)
# The rows of the CSV file [{service: "service1", region: "region1", ...}, ...]
diff --git a/keep/api/models/db/migrations/versions/2024-07-25-17-13_67f1efb93c99.py b/keep/api/models/db/migrations/versions/2024-07-25-17-13_67f1efb93c99.py
index 7bae04f45..778276ddc 100644
--- a/keep/api/models/db/migrations/versions/2024-07-25-17-13_67f1efb93c99.py
+++ b/keep/api/models/db/migrations/versions/2024-07-25-17-13_67f1efb93c99.py
@@ -14,7 +14,7 @@
# revision identifiers, used by Alembic.
revision = "67f1efb93c99"
-down_revision = "dcbd2873dcfd"
+down_revision = "9ba0aeecd4d0"
branch_labels = None
depends_on = None
diff --git a/keep/api/models/db/migrations/versions/2024-07-28-16-24_8e5942040de6.py b/keep/api/models/db/migrations/versions/2024-07-28-16-24_8e5942040de6.py
index fd0c2dfbb..bad9ddebd 100644
--- a/keep/api/models/db/migrations/versions/2024-07-28-16-24_8e5942040de6.py
+++ b/keep/api/models/db/migrations/versions/2024-07-28-16-24_8e5942040de6.py
@@ -12,7 +12,7 @@
# revision identifiers, used by Alembic.
revision = "8e5942040de6"
-down_revision = "9ba0aeecd4d0"
+down_revision = "67f1efb93c99"
branch_labels = None
depends_on = None
diff --git a/keep/api/models/db/migrations/versions/2024-07-29-18-10_92f4f93f2140.py b/keep/api/models/db/migrations/versions/2024-07-29-18-10_92f4f93f2140.py
index 9287bdc35..2f87c8a83 100644
--- a/keep/api/models/db/migrations/versions/2024-07-29-18-10_92f4f93f2140.py
+++ b/keep/api/models/db/migrations/versions/2024-07-29-18-10_92f4f93f2140.py
@@ -12,7 +12,7 @@
# revision identifiers, used by Alembic.
revision = "92f4f93f2140"
-down_revision = "dcbd2873dcfd"
+down_revision = "c91b348b94f2"
branch_labels = None
depends_on = None
diff --git a/keep/api/models/db/migrations/versions/2024-08-08-13-55_42098785763c.py b/keep/api/models/db/migrations/versions/2024-08-08-13-55_42098785763c.py
deleted file mode 100644
index 583ae21f2..000000000
--- a/keep/api/models/db/migrations/versions/2024-08-08-13-55_42098785763c.py
+++ /dev/null
@@ -1,21 +0,0 @@
-"""Merging 2 heads
-
-Revision ID: 42098785763c
-Revises: 67f1efb93c99, 4147d9e706c0
-Create Date: 2024-08-08 13:55:55.191243
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = "42098785763c"
-down_revision = ("67f1efb93c99", "4147d9e706c0")
-branch_labels = None
-depends_on = None
-
-
-def upgrade() -> None:
- pass
-
-
-def downgrade() -> None:
- pass
diff --git a/keep/api/models/db/migrations/versions/2024-08-09-10-53_6e353161f5a8.py b/keep/api/models/db/migrations/versions/2024-08-09-10-53_6e353161f5a8.py
deleted file mode 100644
index 9a977d509..000000000
--- a/keep/api/models/db/migrations/versions/2024-08-09-10-53_6e353161f5a8.py
+++ /dev/null
@@ -1,21 +0,0 @@
-"""Merge
-
-Revision ID: 6e353161f5a8
-Revises: c91b348b94f2, 42098785763c
-Create Date: 2024-08-09 10:53:33.363763
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = "6e353161f5a8"
-down_revision = ("c91b348b94f2", "42098785763c")
-branch_labels = None
-depends_on = None
-
-
-def upgrade() -> None:
- pass
-
-
-def downgrade() -> None:
- pass
diff --git a/keep/api/models/db/migrations/versions/2024-08-11-17-38_9453855f3ba0.py b/keep/api/models/db/migrations/versions/2024-08-11-17-38_9453855f3ba0.py
index 87fd3d551..929635927 100644
--- a/keep/api/models/db/migrations/versions/2024-08-11-17-38_9453855f3ba0.py
+++ b/keep/api/models/db/migrations/versions/2024-08-11-17-38_9453855f3ba0.py
@@ -12,7 +12,7 @@
# revision identifiers, used by Alembic.
revision = "9453855f3ba0"
-down_revision = "42098785763c"
+down_revision = "4147d9e706c0"
branch_labels = None
depends_on = None
diff --git a/keep/api/models/db/migrations/versions/2024-08-11-19-45_005efc57cc1c.py b/keep/api/models/db/migrations/versions/2024-08-11-19-45_005efc57cc1c.py
deleted file mode 100644
index efeb9d42e..000000000
--- a/keep/api/models/db/migrations/versions/2024-08-11-19-45_005efc57cc1c.py
+++ /dev/null
@@ -1,21 +0,0 @@
-"""empty message
-
-Revision ID: 005efc57cc1c
-Revises: 9453855f3ba0, 6e353161f5a8
-Create Date: 2024-08-11 19:45:08.308034
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = "005efc57cc1c"
-down_revision = ("9453855f3ba0", "6e353161f5a8")
-branch_labels = None
-depends_on = None
-
-
-def upgrade() -> None:
- pass
-
-
-def downgrade() -> None:
- pass
diff --git a/keep/api/models/db/migrations/versions/2024-08-13-19-22_0832e0d9889a.py b/keep/api/models/db/migrations/versions/2024-08-13-19-22_0832e0d9889a.py
index 9be27e3b6..9738f8bb0 100644
--- a/keep/api/models/db/migrations/versions/2024-08-13-19-22_0832e0d9889a.py
+++ b/keep/api/models/db/migrations/versions/2024-08-13-19-22_0832e0d9889a.py
@@ -13,7 +13,7 @@
# revision identifiers, used by Alembic.
revision = "0832e0d9889a"
-down_revision = "005efc57cc1c"
+down_revision = "9453855f3ba0"
branch_labels = None
depends_on = None
diff --git a/keep/api/models/db/migrations/versions/2024-09-01-14-04_94886bc59c11.py b/keep/api/models/db/migrations/versions/2024-09-01-14-04_94886bc59c11.py
index f50fc7800..a45e6e58a 100644
--- a/keep/api/models/db/migrations/versions/2024-09-01-14-04_94886bc59c11.py
+++ b/keep/api/models/db/migrations/versions/2024-09-01-14-04_94886bc59c11.py
@@ -12,7 +12,7 @@
# revision identifiers, used by Alembic.
revision = "94886bc59c11"
-down_revision = "1c650a429672"
+down_revision = "7ed12220a0d3"
branch_labels = None
depends_on = None
diff --git a/keep/api/models/db/migrations/versions/2024-09-04-09-38_b30d2141e1cb.py b/keep/api/models/db/migrations/versions/2024-09-04-09-38_b30d2141e1cb.py
deleted file mode 100644
index efe165a13..000000000
--- a/keep/api/models/db/migrations/versions/2024-09-04-09-38_b30d2141e1cb.py
+++ /dev/null
@@ -1,21 +0,0 @@
-"""Merge migrations to resolve double-headed issue
-
-Revision ID: b30d2141e1cb
-Revises: 7ed12220a0d3, 49e7c02579db
-Create Date: 2024-09-04 09:38:33.869973
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = "b30d2141e1cb"
-down_revision = ("7ed12220a0d3", "49e7c02579db")
-branch_labels = None
-depends_on = None
-
-
-def upgrade() -> None:
- pass
-
-
-def downgrade() -> None:
- pass
diff --git a/keep/api/models/db/migrations/versions/2024-09-10-17-59_710b4ff1d19e.py b/keep/api/models/db/migrations/versions/2024-09-10-17-59_710b4ff1d19e.py
deleted file mode 100644
index c2a93173d..000000000
--- a/keep/api/models/db/migrations/versions/2024-09-10-17-59_710b4ff1d19e.py
+++ /dev/null
@@ -1,21 +0,0 @@
-"""empty message
-
-Revision ID: 710b4ff1d19e
-Revises: 1aacee84447e, b30d2141e1cb
-Create Date: 2024-09-10 17:59:56.210094
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = "710b4ff1d19e"
-down_revision = ("1aacee84447e", "b30d2141e1cb")
-branch_labels = None
-depends_on = None
-
-
-def upgrade() -> None:
- pass
-
-
-def downgrade() -> None:
- pass
diff --git a/keep/api/models/db/migrations/versions/2024-09-13-10-48_938b1aa62d5c.py b/keep/api/models/db/migrations/versions/2024-09-13-10-48_938b1aa62d5c.py
index d7cacc71a..72a8082fc 100644
--- a/keep/api/models/db/migrations/versions/2024-09-13-10-48_938b1aa62d5c.py
+++ b/keep/api/models/db/migrations/versions/2024-09-13-10-48_938b1aa62d5c.py
@@ -12,7 +12,7 @@
# revision identifiers, used by Alembic.
revision = "938b1aa62d5c"
-down_revision = "710b4ff1d19e"
+down_revision = "1aacee84447e"
branch_labels = None
depends_on = None
diff --git a/keep/api/models/db/migrations/versions/2024-09-17-23-30_c5443d9deb0f.py b/keep/api/models/db/migrations/versions/2024-09-17-23-30_c5443d9deb0f.py
new file mode 100644
index 000000000..9ecb5c1cc
--- /dev/null
+++ b/keep/api/models/db/migrations/versions/2024-09-17-23-30_c5443d9deb0f.py
@@ -0,0 +1,34 @@
+"""Add status to Incident model
+
+Revision ID: c5443d9deb0f
+Revises: 710b4ff1d19e
+Create Date: 2024-09-11 23:30:04.308017
+
+"""
+
+import sqlalchemy as sa
+import sqlmodel
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = "c5443d9deb0f"
+down_revision = "938b1aa62d5c"
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ with op.batch_alter_table("incident", schema=None) as batch_op:
+ batch_op.add_column(
+ sa.Column("status", sqlmodel.sql.sqltypes.AutoString(), nullable=False, default="firing",
+ server_default="firing")
+ )
+ batch_op.create_index(
+ batch_op.f("ix_incident_status"), ["status"], unique=False
+ )
+
+
+def downgrade() -> None:
+ with op.batch_alter_table("incident", schema=None) as batch_op:
+ batch_op.drop_index(batch_op.f("ix_incident_status"))
+ batch_op.drop_column("status")
diff --git a/keep/api/models/db/migrations/versions/2024-09-18-02-05_772790c2e50a.py b/keep/api/models/db/migrations/versions/2024-09-18-02-05_772790c2e50a.py
new file mode 100644
index 000000000..e04890ab3
--- /dev/null
+++ b/keep/api/models/db/migrations/versions/2024-09-18-02-05_772790c2e50a.py
@@ -0,0 +1,38 @@
+"""add WorkflowToIncidentExecution
+
+Revision ID: 772790c2e50a
+Revises: 49e7c02579db
+Create Date: 2024-09-08 02:05:42.739163
+
+"""
+
+import sqlalchemy as sa
+import sqlmodel
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = "772790c2e50a"
+down_revision = "c5443d9deb0f"
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ op.create_table(
+ "workflowtoincidentexecution",
+ sa.Column("id", sa.Integer(), nullable=False),
+ sa.Column(
+ "workflow_execution_id", sqlmodel.sql.sqltypes.AutoString(), nullable=False
+ ),
+ sa.Column("incident_id", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["workflow_execution_id"],
+ ["workflowexecution.id"],
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("workflow_execution_id", "incident_id"),
+ )
+
+
+def downgrade() -> None:
+ op.drop_table("workflowtoincidentexecution")
diff --git a/keep/api/models/db/migrations/versions/2024-09-18-14-08_5d7ae55efc6a.py b/keep/api/models/db/migrations/versions/2024-09-18-14-08_5d7ae55efc6a.py
new file mode 100644
index 000000000..8f0ce6c47
--- /dev/null
+++ b/keep/api/models/db/migrations/versions/2024-09-18-14-08_5d7ae55efc6a.py
@@ -0,0 +1,38 @@
+"""mappingrule type default value
+
+Revision ID: 5d7ae55efc6a
+Revises: 938b1aa62d5c
+Create Date: 2024-09-18 14:08:49.363483
+
+"""
+
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = "5d7ae55efc6a"
+down_revision = "772790c2e50a"
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ with op.batch_alter_table("mappingrule", schema=None) as batch_op:
+ batch_op.alter_column(
+ "type",
+ existing_type=sa.VARCHAR(length=255),
+ nullable=False,
+ server_default="csv",
+ )
+
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ with op.batch_alter_table("mappingrule", schema=None) as batch_op:
+ batch_op.alter_column(
+ "type", existing_type=sa.VARCHAR(length=255), nullable=True
+ )
+ # ### end Alembic commands ###
diff --git a/keep/api/models/db/tenant.py b/keep/api/models/db/tenant.py
index d7a67e815..2ccf32833 100644
--- a/keep/api/models/db/tenant.py
+++ b/keep/api/models/db/tenant.py
@@ -36,4 +36,4 @@ class TenantInstallation(SQLModel, table=True):
tenant_id: str = Field(foreign_key="tenant.id")
bot_id: str
installed: bool = False
- tenant: Optional[Tenant] = Relationship(back_populates="installations")
+ tenant: Optional[Tenant] = Relationship(back_populates="installations")
\ No newline at end of file
diff --git a/keep/api/models/db/workflow.py b/keep/api/models/db/workflow.py
index 3426a9560..f243b51f0 100644
--- a/keep/api/models/db/workflow.py
+++ b/keep/api/models/db/workflow.py
@@ -53,6 +53,9 @@ class WorkflowExecution(SQLModel, table=True):
workflow_to_alert_execution: "WorkflowToAlertExecution" = Relationship(
back_populates="workflow_execution"
)
+ workflow_to_incident_execution: "WorkflowToIncidentExecution" = Relationship(
+ back_populates="workflow_execution"
+ )
class Config:
orm_mode = True
@@ -71,6 +74,18 @@ class WorkflowToAlertExecution(SQLModel, table=True):
)
+class WorkflowToIncidentExecution(SQLModel, table=True):
+ __table_args__ = (UniqueConstraint("workflow_execution_id", "incident_id"),)
+
+ # https://sqlmodel.tiangolo.com/tutorial/automatic-id-none-refresh/
+ id: Optional[int] = Field(primary_key=True, default=None)
+ workflow_execution_id: str = Field(foreign_key="workflowexecution.id")
+ incident_id: str | None
+ workflow_execution: WorkflowExecution = Relationship(
+ back_populates="workflow_to_incident_execution"
+ )
+
+
class WorkflowExecutionLog(SQLModel, table=True):
id: int = Field(default=None, primary_key=True)
workflow_execution_id: str = Field(foreign_key="workflowexecution.id")
diff --git a/keep/api/routes/alerts.py b/keep/api/routes/alerts.py
index f14b63f23..210d5adea 100644
--- a/keep/api/routes/alerts.py
+++ b/keep/api/routes/alerts.py
@@ -34,6 +34,7 @@
EnrichAlertRequestBody,
UnEnrichAlertRequestBody,
)
+from keep.api.models.alert_audit import AlertAuditDto
from keep.api.models.db.alert import AlertActionType
from keep.api.models.search_alert import SearchAlertsRequest
from keep.api.tasks.process_event_task import process_event
@@ -318,7 +319,11 @@ async def receive_generic_event(
description="Helper function to complete Netdata webhook challenge",
)
async def webhook_challenge():
- token = Request.query_params.get("token").encode("ascii")
+ try:
+ token = Request.query_params.get("token").encode("ascii")
+ except Exception as e:
+ logger.exception("Failed to get token", extra={"error": str(e)})
+ raise HTTPException(status_code=400, detail="Bad request: failed to get token")
KEY = "keep-netdata-webhook-integration"
# creates HMAC SHA-256 hash from incomming token and your consumer secret
@@ -424,6 +429,15 @@ def get_alert(
description="Enrich an alert",
)
def enrich_alert(
+ enrich_data: EnrichAlertRequestBody,
+ authenticated_entity: AuthenticatedEntity = Depends(
+ IdentityManagerFactory.get_auth_verifier(["write:alert"])
+ ),
+) -> dict[str, str]:
+ return _enrich_alert(enrich_data, authenticated_entity=authenticated_entity)
+
+
+def _enrich_alert(
enrich_data: EnrichAlertRequestBody,
pusher_client: Pusher = Depends(get_pusher_client),
authenticated_entity: AuthenticatedEntity = Depends(
@@ -673,16 +687,49 @@ async def search_alerts(
raise HTTPException(status_code=500, detail="Failed to search alerts")
+@router.post(
+ "/audit",
+ description="Get alert timeline audit trail for multiple fingerprints",
+)
+def get_multiple_fingerprint_alert_audit(
+ fingerprints: list[str],
+ authenticated_entity: AuthenticatedEntity = Depends(
+ IdentityManagerFactory.get_auth_verifier(["read:alert"])
+ ),
+) -> list[AlertAuditDto]:
+ tenant_id = authenticated_entity.tenant_id
+ logger.info(
+ "Fetching alert audit",
+ extra={"fingerprints": fingerprints, "tenant_id": tenant_id},
+ )
+ alert_audit = get_alert_audit_db(tenant_id, fingerprints)
+
+ if not alert_audit:
+ raise HTTPException(status_code=404, detail="Alert not found")
+ grouped_events = []
+
+ # Group the results by fingerprint for "deduplication" (2x, 3x, etc.) thingy..
+ grouped_audit = {}
+ for audit in alert_audit:
+ if audit.fingerprint not in grouped_audit:
+ grouped_audit[audit.fingerprint] = []
+ grouped_audit[audit.fingerprint].append(audit)
+
+ for values in grouped_audit.values():
+ grouped_events.extend(AlertAuditDto.from_orm_list(values))
+ return grouped_events
+
+
@router.get(
"/{fingerprint}/audit",
- description="Get alert enrichment",
+ description="Get alert timeline audit trail",
)
def get_alert_audit(
fingerprint: str,
authenticated_entity: AuthenticatedEntity = Depends(
IdentityManagerFactory.get_auth_verifier(["read:alert"])
),
-):
+) -> list[AlertAuditDto]:
tenant_id = authenticated_entity.tenant_id
logger.info(
"Fetching alert audit",
@@ -695,29 +742,5 @@ def get_alert_audit(
if not alert_audit:
raise HTTPException(status_code=404, detail="Alert not found")
- grouped_events = []
- previous_event = None
- count = 1
-
- for event in alert_audit:
- if previous_event and (
- event.user_id == previous_event.user_id
- and event.action == previous_event.action
- and event.description == previous_event.description
- ):
- count += 1
- else:
- if previous_event:
- if count > 1:
- previous_event.description += f" x{count}"
- grouped_events.append(previous_event.dict())
- previous_event = event
- count = 1
-
- # Add the last event
- if previous_event:
- if count > 1:
- previous_event.description += f" x{count}"
- grouped_events.append(previous_event.dict())
-
+ grouped_events = AlertAuditDto.from_orm_list(alert_audit)
return grouped_events
diff --git a/keep/api/routes/incidents.py b/keep/api/routes/incidents.py
index dd9a07f05..ca2bf87aa 100644
--- a/keep/api/routes/incidents.py
+++ b/keep/api/routes/incidents.py
@@ -21,10 +21,15 @@
get_incident_unique_fingerprint_count,
get_last_incidents,
remove_alerts_to_incident_by_incident_id,
- update_incident_from_dto_by_id, IncidentSorting,
+ update_incident_from_dto_by_id,
+ change_incident_status_by_id,
+ update_incident_from_dto_by_id,
+ IncidentSorting,
)
from keep.api.core.dependencies import get_pusher_client
-from keep.api.models.alert import AlertDto, IncidentDto, IncidentDtoIn
+from keep.api.models.alert import AlertDto, IncidentDto, IncidentDtoIn, IncidentStatusChangeDto, IncidentStatus, \
+ EnrichAlertRequestBody
+from keep.api.routes.alerts import _enrich_alert
from keep.api.utils.enrichment_helpers import convert_db_alerts_to_dto_alerts
from keep.api.utils.import_ee import mine_incidents_and_create_objects
from keep.api.utils.pagination import (
@@ -33,6 +38,7 @@
)
from keep.identitymanager.authenticatedentity import AuthenticatedEntity
from keep.identitymanager.identitymanagerfactory import IdentityManagerFactory
+from keep.workflowmanager.workflowmanager import WorkflowManager
router = APIRouter()
logger = logging.getLogger(__name__)
@@ -49,7 +55,6 @@
sys.path.insert(0, path_with_ee)
from ee.experimental.incident_utils import ( # noqa
ALGORITHM_VERBOSE_NAME,
- mine_incidents,
)
@@ -94,7 +99,7 @@ def __update_client_on_incident_change(
def create_incident_endpoint(
incident_dto: IncidentDtoIn,
authenticated_entity: AuthenticatedEntity = Depends(
- IdentityManagerFactory.get_auth_verifier(["read:alert"])
+ IdentityManagerFactory.get_auth_verifier(["write:incident"])
),
pusher_client: Pusher | None = Depends(get_pusher_client),
) -> IncidentDto:
@@ -114,6 +119,21 @@ def create_incident_endpoint(
},
)
__update_client_on_incident_change(pusher_client, tenant_id)
+
+ try:
+ workflow_manager = WorkflowManager.get_instance()
+ logger.info("Adding incident to the workflow manager queue")
+ workflow_manager.insert_incident(tenant_id, new_incident_dto, "created")
+ logger.info("Added incident to the workflow manager queue")
+ except Exception:
+ logger.exception(
+ "Failed to run workflows based on incident",
+ extra={
+ "incident_id": new_incident_dto.id,
+ "tenant_id": tenant_id
+ },
+ )
+
return new_incident_dto
@@ -168,7 +188,7 @@ def get_all_incidents(
def get_incident(
incident_id: str,
authenticated_entity: AuthenticatedEntity = Depends(
- IdentityManagerFactory.get_auth_verifier(["read:alert"])
+ IdentityManagerFactory.get_auth_verifier(["read:incident"])
),
) -> IncidentDto:
tenant_id = authenticated_entity.tenant_id
@@ -196,7 +216,7 @@ def update_incident(
incident_id: str,
updated_incident_dto: IncidentDtoIn,
authenticated_entity: AuthenticatedEntity = Depends(
- IdentityManagerFactory.get_auth_verifier(["read:alert"])
+ IdentityManagerFactory.get_auth_verifier(["write:incident"])
),
) -> IncidentDto:
tenant_id = authenticated_entity.tenant_id
@@ -215,7 +235,19 @@ def update_incident(
raise HTTPException(status_code=404, detail="Incident not found")
new_incident_dto = IncidentDto.from_db_incident(incident)
-
+ try:
+ workflow_manager = WorkflowManager.get_instance()
+ logger.info("Adding incident to the workflow manager queue")
+ workflow_manager.insert_incident(tenant_id, new_incident_dto, "updated")
+ logger.info("Added incident to the workflow manager queue")
+ except Exception:
+ logger.exception(
+ "Failed to run workflows based on incident",
+ extra={
+ "incident_id": new_incident_dto.id,
+ "tenant_id": tenant_id
+ },
+ )
return new_incident_dto
@@ -226,7 +258,7 @@ def update_incident(
def delete_incident(
incident_id: str,
authenticated_entity: AuthenticatedEntity = Depends(
- IdentityManagerFactory.get_auth_verifier(["read:alert"])
+ IdentityManagerFactory.get_auth_verifier(["write:incident"])
),
pusher_client: Pusher | None = Depends(get_pusher_client),
):
@@ -238,10 +270,30 @@ def delete_incident(
"tenant_id": tenant_id,
},
)
+
+ incident = get_incident_by_id(tenant_id=tenant_id, incident_id=incident_id)
+ if not incident:
+ raise HTTPException(status_code=404, detail="Incident not found")
+
+ incident_dto = IncidentDto.from_db_incident(incident)
+
deleted = delete_incident_by_id(tenant_id=tenant_id, incident_id=incident_id)
if not deleted:
raise HTTPException(status_code=404, detail="Incident not found")
__update_client_on_incident_change(pusher_client, tenant_id)
+ try:
+ workflow_manager = WorkflowManager.get_instance()
+ logger.info("Adding incident to the workflow manager queue")
+ workflow_manager.insert_incident(tenant_id, incident_dto, "deleted")
+ logger.info("Added incident to the workflow manager queue")
+ except Exception:
+ logger.exception(
+ "Failed to run workflows based on incident",
+ extra={
+ "incident_id": incident_dto.id,
+ "tenant_id": tenant_id
+ },
+ )
return Response(status_code=202)
@@ -254,7 +306,7 @@ def get_incident_alerts(
limit: int = 25,
offset: int = 0,
authenticated_entity: AuthenticatedEntity = Depends(
- IdentityManagerFactory.get_auth_verifier(["read:alert"])
+ IdentityManagerFactory.get_auth_verifier(["read:incidents"])
),
) -> AlertPaginatedResultsDto:
tenant_id = authenticated_entity.tenant_id
@@ -306,7 +358,7 @@ async def add_alerts_to_incident(
incident_id: str,
alert_ids: List[UUID],
authenticated_entity: AuthenticatedEntity = Depends(
- IdentityManagerFactory.get_auth_verifier(["read:alert"])
+ IdentityManagerFactory.get_auth_verifier(["write:incident"])
),
pusher_client: Pusher | None = Depends(get_pusher_client),
):
@@ -325,6 +377,22 @@ async def add_alerts_to_incident(
add_alerts_to_incident_by_incident_id(tenant_id, incident_id, alert_ids)
__update_client_on_incident_change(pusher_client, tenant_id, incident_id)
+ incident_dto = IncidentDto.from_db_incident(incident)
+
+ try:
+ workflow_manager = WorkflowManager.get_instance()
+ logger.info("Adding incident to the workflow manager queue")
+ workflow_manager.insert_incident(tenant_id, incident_dto, "updated")
+ logger.info("Added incident to the workflow manager queue")
+ except Exception:
+ logger.exception(
+ "Failed to run workflows based on incident",
+ extra={
+ "incident_id": incident_dto.id,
+ "tenant_id": tenant_id
+ },
+ )
+
fingerprints_count = get_incident_unique_fingerprint_count(tenant_id, incident_id)
if (
@@ -360,7 +428,7 @@ def delete_alerts_from_incident(
incident_id: str,
alert_ids: List[UUID],
authenticated_entity: AuthenticatedEntity = Depends(
- IdentityManagerFactory.get_auth_verifier(["read:alert"])
+ IdentityManagerFactory.get_auth_verifier(["write:incident"])
),
):
tenant_id = authenticated_entity.tenant_id
@@ -387,33 +455,33 @@ def delete_alerts_from_incident(
)
def mine(
authenticated_entity: AuthenticatedEntity = Depends(
- IdentityManagerFactory.get_auth_verifier(["write:incidents"])
+ IdentityManagerFactory.get_auth_verifier(["write:incident"])
),
alert_lower_timestamp: datetime = None,
alert_upper_timestamp: datetime = None,
- use_n_historical_alerts: int = 10e10,
+ use_n_historical_alerts: int = None,
incident_lower_timestamp: datetime = None,
incident_upper_timestamp: datetime = None,
- use_n_hist_incidents: int = 10e10,
- pmi_threshold: float = 0.0,
- knee_threshold: float = 0.8,
- min_incident_size: int = 5,
- incident_similarity_threshold: float = 0.8,
+ use_n_historical_incidents: int = None,
+ pmi_threshold: float = None,
+ knee_threshold: float = None,
+ min_incident_size: int = None,
+ incident_similarity_threshold: float = None,
) -> dict:
result = asyncio.run(
mine_incidents_and_create_objects(
- None,
- authenticated_entity.tenant_id,
- alert_lower_timestamp,
- alert_upper_timestamp,
- use_n_historical_alerts,
- incident_lower_timestamp,
- incident_upper_timestamp,
- use_n_hist_incidents,
- pmi_threshold,
- knee_threshold,
- min_incident_size,
- incident_similarity_threshold,
+ ctx=None,
+ tenant_id=authenticated_entity.tenant_id,
+ alert_lower_timestamp=alert_lower_timestamp,
+ alert_upper_timestamp=alert_upper_timestamp,
+ use_n_historical_alerts=use_n_historical_alerts,
+ incident_lower_timestamp=incident_lower_timestamp,
+ incident_upper_timestamp=incident_upper_timestamp,
+ use_n_historical_incidents=use_n_historical_incidents,
+ pmi_threshold=pmi_threshold,
+ knee_threshold=knee_threshold,
+ min_incident_size=min_incident_size,
+ incident_similarity_threshold=incident_similarity_threshold,
)
)
return result
@@ -422,12 +490,13 @@ def mine(
@router.post(
"/{incident_id}/confirm",
description="Confirm predicted incident by id",
+ response_model=IncidentDto
)
def confirm_incident(
incident_id: str,
authenticated_entity: AuthenticatedEntity = Depends(
- IdentityManagerFactory.get_auth_verifier(["read:alert"])
- ),
+ IdentityManagerFactory.get_auth_verifier(["write:incident"])
+ )
) -> IncidentDto:
tenant_id = authenticated_entity.tenant_id
logger.info(
@@ -445,3 +514,50 @@ def confirm_incident(
new_incident_dto = IncidentDto.from_db_incident(incident)
return new_incident_dto
+
+@router.post(
+ "/{incident_id}/status",
+ description="Change incident status",
+ response_model=IncidentDto
+)
+def change_incident_status(
+ incident_id: str,
+ change: IncidentStatusChangeDto,
+ authenticated_entity: AuthenticatedEntity = Depends(
+ IdentityManagerFactory.get_auth_verifier(["write:incident"])
+ )
+) -> IncidentDto:
+ tenant_id = authenticated_entity.tenant_id
+ logger.info(
+ "Fetching incident",
+ extra={
+ "incident_id": incident_id,
+ "tenant_id": tenant_id,
+ },
+ )
+
+ with_alerts = change.status == IncidentStatus.RESOLVED
+ incident = get_incident_by_id(tenant_id, incident_id, with_alerts=with_alerts)
+ if not incident:
+ raise HTTPException(status_code=404, detail="Incident not found")
+
+ # We need to do something only if status really changed
+ if not change.status == incident.status:
+ result = change_incident_status_by_id(tenant_id, incident_id, change.status)
+ if not result:
+ raise HTTPException(status_code=500, detail="Error changing incident status")
+ # TODO: same this change to audit table with the comment
+
+ if change.status == IncidentStatus.RESOLVED:
+ for alert in incident.alerts:
+ _enrich_alert(EnrichAlertRequestBody(
+ enrichments={"status": "resolved"},
+ fingerprint=alert.fingerprint
+ ), authenticated_entity=authenticated_entity)
+
+
+ incident.status = change.status
+
+ new_incident_dto = IncidentDto.from_db_incident(incident)
+
+ return new_incident_dto
\ No newline at end of file
diff --git a/keep/api/routes/preset.py b/keep/api/routes/preset.py
index 53fa09f85..45f46c983 100644
--- a/keep/api/routes/preset.py
+++ b/keep/api/routes/preset.py
@@ -102,6 +102,9 @@ def pull_data_from_providers(
f"Pulling alerts from provider {provider.type} ({provider.id})",
extra=extra,
)
+ # Even if we failed at processing some event, lets save the last pull time to not iterate this process over and over again.
+ update_provider_last_pull_time(tenant_id=tenant_id, provider_id=provider.id)
+
provider_class = ProvidersFactory.get_provider(
context_manager=context_manager,
provider_id=provider.id,
@@ -121,7 +124,8 @@ def pull_data_from_providers(
logger.info("Pulling topology data", extra=extra)
topology_data = provider_class.pull_topology()
logger.info(
- "Pulling topology data finished, processing", extra=extra
+ "Pulling topology data finished, processing",
+ extra={**extra, "topology_length": len(topology_data)},
)
process_topology(
tenant_id, topology_data, provider.id, provider.type
@@ -155,9 +159,6 @@ def pull_data_from_providers(
f"Unknown error pulling from provider {provider.type} ({provider.id})",
extra=extra,
)
- finally:
- # Even if we failed at processing some event, lets save the last pull time to not iterate this process over and over again.
- update_provider_last_pull_time(tenant_id=tenant_id, provider_id=provider.id)
logger.info(
"Pulling data from providers completed",
extra={
diff --git a/keep/api/tasks/process_topology_task.py b/keep/api/tasks/process_topology_task.py
index 5d65de035..b681dcd54 100644
--- a/keep/api/tasks/process_topology_task.py
+++ b/keep/api/tasks/process_topology_task.py
@@ -72,7 +72,7 @@ def process_topology(
service_id = service_to_keep_service_id_map.get(service.service)
depends_on_service_id = service_to_keep_service_id_map.get(dependency)
if not service_id or not depends_on_service_id:
- logger.warning(
+ logger.debug(
"Found a dangling service, skipping",
extra={"service": service.service, "dependency": dependency},
)
diff --git a/keep/api/utils/enrichment_helpers.py b/keep/api/utils/enrichment_helpers.py
index 7085626c0..bd2baa38c 100644
--- a/keep/api/utils/enrichment_helpers.py
+++ b/keep/api/utils/enrichment_helpers.py
@@ -108,9 +108,7 @@ def convert_db_alerts_to_dto_alerts(alerts: list[Alert]) -> list[AlertDto]:
)
continue
- # include the db event id if it's not present
- if alert_dto.event_id is None:
- alert_dto.event_id = str(alert.id)
+ alert_dto.event_id = str(alert.id)
# enrich provider id when it's possible
if alert_dto.providerId is None:
diff --git a/keep/api/utils/import_ee.py b/keep/api/utils/import_ee.py
index 5d06ff228..de1742c1f 100644
--- a/keep/api/utils/import_ee.py
+++ b/keep/api/utils/import_ee.py
@@ -17,7 +17,8 @@
sys.path.insert(0, path_with_ee)
from ee.experimental.incident_utils import mine_incidents_and_create_objects, generate_update_incident_summary, generate_update_incident_name # noqa
- from ee.experimental.incident_utils import ALGORITHM_VERBOSE_NAME, SUMMARY_GENERATOR_VERBOSE_NAME, NAME_GENERATOR_VERBOSE_NAME # noqa
+ from ee.experimental.generative_utils import generate_incident_summary, generate_incident_name, SUMMARY_GENERATOR_VERBOSE_NAME, NAME_GENERATOR_VERBOSE_NAME # noqa
+ from ee.experimental.incident_utils import ALGORITHM_VERBOSE_NAME # noqa
else:
mine_incidents_and_create_objects = NotImplemented
generate_update_incident_summary = NotImplemented
diff --git a/keep/contextmanager/contextmanager.py b/keep/contextmanager/contextmanager.py
index 953a97757..2f5611fa7 100644
--- a/keep/contextmanager/contextmanager.py
+++ b/keep/contextmanager/contextmanager.py
@@ -22,6 +22,7 @@ def __init__(self, tenant_id, workflow_id=None, workflow_execution_id=None):
self.providers_context = {}
self.actions_context = {}
self.event_context = {}
+ self.incident_context = {}
self.foreach_context = {
"value": None,
}
@@ -78,6 +79,9 @@ def get_logger(self):
def set_event_context(self, event):
self.event_context = event
+ def set_incident_context(self, incident):
+ self.incident_context = incident
+
def get_workflow_id(self):
return self.workflow_id
@@ -104,6 +108,7 @@ def get_full_context(self, exclude_providers=False, exclude_env=False):
"event": self.event_context,
"last_workflow_results": self.last_workflow_execution_results,
"alert": self.event_context, # this is an alias so workflows will be able to use alert.source
+ "incident": self.incident_context, # this is an alias so workflows will be able to use alert.source
}
if not exclude_providers:
diff --git a/keep/providers/providers_service.py b/keep/providers/providers_service.py
index 9dbb0a3ba..debb97662 100644
--- a/keep/providers/providers_service.py
+++ b/keep/providers/providers_service.py
@@ -100,6 +100,16 @@ def install_provider(
session.add(provider_model)
session.commit()
except IntegrityError:
+ try:
+ # if the provider is already installed, delete the secret
+ logger.warning("Provider already installed, deleting secret")
+ secret_manager.delete_secret(
+ secret_name=secret_name,
+ )
+ logger.warning("Secret deleted")
+ except Exception:
+ logger.exception("Failed to delete the secret")
+ pass
raise HTTPException(
status_code=409, detail="Provider already installed"
)
diff --git a/keep/step/step.py b/keep/step/step.py
index be7fae881..4b0caede4 100644
--- a/keep/step/step.py
+++ b/keep/step/step.py
@@ -91,8 +91,10 @@ def _get_foreach_items(self) -> list | list[list]:
index = [i.strip() for i in index]
items = self.context_manager.get_full_context()
for i in index:
- # try to get it as a dict
- items = items.get(i, {})
+ if isinstance(items, dict):
+ items = items.get(i, {})
+ else:
+ items = getattr(items, i, {})
foreach_items.append(items)
if not foreach_items:
return []
diff --git a/keep/workflowmanager/workflowmanager.py b/keep/workflowmanager/workflowmanager.py
index d50ab8939..acbdb286b 100644
--- a/keep/workflowmanager/workflowmanager.py
+++ b/keep/workflowmanager/workflowmanager.py
@@ -4,13 +4,15 @@
import typing
import uuid
+from pandas.core.common import flatten
+
from keep.api.core.config import AuthenticationType, config
from keep.api.core.db import (
get_enrichment,
get_previous_alert_by_fingerprint,
save_workflow_results,
)
-from keep.api.models.alert import AlertDto, AlertSeverity
+from keep.api.models.alert import AlertDto, AlertSeverity, IncidentDto
from keep.providers.providers_factory import ProviderConfigurationException
from keep.workflowmanager.workflow import Workflow
from keep.workflowmanager.workflowscheduler import WorkflowScheduler
@@ -68,7 +70,74 @@ def _apply_filter(self, filter_val, value):
return value == str(filter_val)
return value == filter_val
- def insert_events(self, tenant_id, events: typing.List[AlertDto]):
+ def _get_workflow_from_store(self, tenant_id, workflow_model):
+ try:
+ # get the actual workflow that can be triggered
+ self.logger.info("Getting workflow from store")
+ workflow = self.workflow_store.get_workflow(
+ tenant_id, workflow_model.id
+ )
+ self.logger.info("Got workflow from store")
+ return workflow
+ except ProviderConfigurationException:
+ self.logger.exception(
+ "Workflow have a provider that is not configured",
+ extra={
+ "workflow_id": workflow_model.id,
+ "tenant_id": tenant_id,
+ },
+ )
+ except Exception:
+ self.logger.exception(
+ "Error getting workflow",
+ extra={
+ "workflow_id": workflow_model.id,
+ "tenant_id": tenant_id,
+ },
+ )
+
+ def insert_incident(self, tenant_id: str, incident: IncidentDto, trigger: str):
+ all_workflow_models = self.workflow_store.get_all_workflows(tenant_id)
+ self.logger.info(
+ "Got all workflows",
+ extra={
+ "num_of_workflows": len(all_workflow_models),
+ },
+ )
+ for workflow_model in all_workflow_models:
+
+ if workflow_model.is_disabled:
+ self.logger.debug(
+ f"Skipping the workflow: id={workflow_model.id}, name={workflow_model.name}, "
+ f"tenant_id={workflow_model.tenant_id} - Workflow is disabled."
+ )
+ continue
+ workflow = self._get_workflow_from_store(tenant_id, workflow_model)
+ if workflow is None:
+ continue
+
+ incident_triggers = flatten(
+ [t.get("events", []) for t in workflow.workflow_triggers if t["type"] == "incident"]
+ )
+
+ if trigger not in incident_triggers:
+ self.logger.debug("workflow does not contain trigger %s, skipping", trigger)
+ continue
+
+ self.logger.info("Adding workflow to run")
+ with self.scheduler.lock:
+ self.scheduler.workflows_to_run.append(
+ {
+ "workflow": workflow,
+ "workflow_id": workflow_model.id,
+ "tenant_id": tenant_id,
+ "triggered_by": "incident:{}".format(trigger),
+ "event": incident,
+ }
+ )
+ self.logger.info("Workflow added to run")
+
+ def insert_events(self, tenant_id, events: typing.List[AlertDto | IncidentDto]):
for event in events:
self.logger.info("Getting all workflows")
all_workflow_models = self.workflow_store.get_all_workflows(tenant_id)
@@ -79,37 +148,17 @@ def insert_events(self, tenant_id, events: typing.List[AlertDto]):
},
)
for workflow_model in all_workflow_models:
+
if workflow_model.is_disabled:
self.logger.debug(
f"Skipping the workflow: id={workflow_model.id}, name={workflow_model.name}, "
f"tenant_id={workflow_model.tenant_id} - Workflow is disabled."
)
continue
- try:
- # get the actual workflow that can be triggered
- self.logger.info("Getting workflow from store")
- workflow = self.workflow_store.get_workflow(
- tenant_id, workflow_model.id
- )
- self.logger.info("Got workflow from store")
- except ProviderConfigurationException:
- self.logger.exception(
- "Workflow have a provider that is not configured",
- extra={
- "workflow_id": workflow_model.id,
- "tenant_id": tenant_id,
- },
- )
- continue
- except Exception:
- self.logger.exception(
- "Error getting workflow",
- extra={
- "workflow_id": workflow_model.id,
- "tenant_id": tenant_id,
- },
- )
+ workflow = self._get_workflow_from_store(tenant_id, workflow_model)
+ if workflow is None:
continue
+
for trigger in workflow.workflow_triggers:
# TODO: handle it better
if not trigger.get("type") == "alert":
@@ -371,7 +420,8 @@ def _run_workflow(
return [errors, results]
- def _get_workflow_results(self, workflow: Workflow):
+ @staticmethod
+ def _get_workflow_results(workflow: Workflow):
"""
Get the results of the workflow from the DB.
@@ -381,8 +431,7 @@ def _get_workflow_results(self, workflow: Workflow):
Returns:
dict: The results of the workflow.
"""
- print("workflowssssss", workflow.workflow_actions)
- print(workflow.workflow_steps)
+
workflow_results = {
action.name: action.provider.results for action in workflow.workflow_actions
}
diff --git a/keep/workflowmanager/workflowscheduler.py b/keep/workflowmanager/workflowscheduler.py
index 693b5000b..c7df0b534 100644
--- a/keep/workflowmanager/workflowscheduler.py
+++ b/keep/workflowmanager/workflowscheduler.py
@@ -15,7 +15,7 @@
from keep.api.core.db import get_enrichment, get_previous_execution_id
from keep.api.core.db import get_workflow as get_workflow_db
from keep.api.core.db import get_workflows_that_should_run
-from keep.api.models.alert import AlertDto
+from keep.api.models.alert import AlertDto, IncidentDto
from keep.providers.providers_factory import ProviderConfigurationException
from keep.workflowmanager.workflow import Workflow, WorkflowStrategy
from keep.workflowmanager.workflowstore import WorkflowStore
@@ -57,10 +57,11 @@ def _handle_interval_workflows(self):
pass
for workflow in workflows:
self.logger.debug("Running workflow on background")
+
+ workflow_execution_id = workflow.get("workflow_execution_id")
+ tenant_id = workflow.get("tenant_id")
+ workflow_id = workflow.get("workflow_id")
try:
- workflow_execution_id = workflow.get("workflow_execution_id")
- tenant_id = workflow.get("tenant_id")
- workflow_id = workflow.get("workflow_id")
workflow = self.workflow_store.get_workflow(tenant_id, workflow_id)
except ProviderConfigurationException:
self.logger.exception(
@@ -106,8 +107,13 @@ def _run_workflow(
):
self.logger.info(f"Running workflow {workflow.workflow_id}...")
try:
- # set the event context, e.g. the event that triggered the workflow
- workflow.context_manager.set_event_context(event_context)
+ if isinstance(event_context, AlertDto):
+ # set the event context, e.g. the event that triggered the workflow
+ workflow.context_manager.set_event_context(event_context)
+ else:
+ # set the incident context, e.g. the incident that triggered the workflow
+ workflow.context_manager.set_incident_context(event_context)
+
errors, _ = self.workflow_manager._run_workflow(
workflow, workflow_execution_id
)
@@ -216,6 +222,7 @@ def handle_manual_event_workflow(
execution_number=unique_execution_number,
fingerprint=alert.fingerprint,
event_id=alert.event_id,
+ event_type="alert"
)
self.logger.info(f"Workflow execution id: {workflow_execution_id}")
# This is kinda WTF exception since create_workflow_execution shouldn't fail for manual
@@ -313,13 +320,26 @@ def _handle_event_workflows(self):
continue
event = workflow_to_run.get("event")
+
triggered_by = workflow_to_run.get("triggered_by")
if triggered_by == "manual":
triggered_by_user = workflow_to_run.get("triggered_by_user")
triggered_by = f"manually by {triggered_by_user}"
+ elif triggered_by.startswith("incident:"):
+ triggered_by = f"type:{triggered_by} name:{event.name} id:{event.id}"
else:
triggered_by = f"type:alert name:{event.name} id:{event.id}"
+ if isinstance(event, IncidentDto):
+ event_id = str(event.id)
+ event_type = "incident"
+ fingerprint = "incident:{}".format(event_id)
+ else:
+ event_id = event.event_id
+ event_type = "alert"
+ fingerprint = event.fingerprint
+
+
# In manual, we create the workflow execution id sync so it could be tracked by the caller (UI)
# In event (e.g. alarm), we will create it here
if not workflow_execution_id:
@@ -333,16 +353,17 @@ def _handle_event_workflows(self):
# else, we want to enforce that no workflow already run with the same fingerprint
else:
workflow_execution_number = self._get_unique_execution_number(
- event.fingerprint
+ fingerprint
)
workflow_execution_id = create_workflow_execution(
workflow_id=workflow_id,
tenant_id=tenant_id,
triggered_by=triggered_by,
execution_number=workflow_execution_number,
- fingerprint=event.fingerprint,
- event_id=event.event_id,
+ fingerprint=fingerprint,
+ event_id=event_id,
execution_id=execution_id,
+ event_type=event_type,
)
# If there is already running workflow from the same event
except IntegrityError:
@@ -404,7 +425,7 @@ def _handle_event_workflows(self):
# - the second one will wait for the next iteration
# - on the next iteratino, the second alert enriched with the ticket_url
# and will trigger a workflow that will update the ticket with "resolved"
- if workflow_to_run.get("retry", False):
+ if workflow_to_run.get("retry", False) and isinstance(event, AlertDto):
try:
self.logger.info(
"Updating enrichments for workflow after retry",
diff --git a/scripts/docs_get_providers_list.py b/scripts/docs_get_providers_list.py
index 6d01b3392..a12687f38 100644
--- a/scripts/docs_get_providers_list.py
+++ b/scripts/docs_get_providers_list.py
@@ -6,10 +6,10 @@
python get_providers_list.py --validate # To check docs/providers/overview.mdx
"""
+import argparse
import glob
import os
import re
-import argparse
LOGO_DEV_PUBLISHABLE_KEY = "pk_dfXfZBoKQMGDTIgqu7LvYg"
@@ -24,10 +24,13 @@ def validate(providers_to_validate):
for provider in providers_to_validate:
if provider not in overview_content:
- print(f"""Provider {provider} is not in the docs/providers/overview.md file,
-use scripts/get_providers_list.py to generate recent providers list and update the file.""")
+ print(
+ f"""Provider {provider} is not in the docs/providers/overview.md file,
+use scripts/get_providers_list.py to generate recent providers list and update the file."""
+ )
exit(1)
+
def main():
"""
This script lists all the integrations in the documentation folder and outputs a markdown list of links.
@@ -42,13 +45,12 @@ def main():
if os.path.isfile(file_path):
with open(file_path, "r") as file:
for line in file.readlines():
- match = re.search(r'title:\s*"([^"]+)"', line)
+ match = re.search(r"title:\s*[\"|\']([^\"]+)[\"|\']", line)
if match:
url = "/providers/documentation/" + file_path.replace(
"./../docs/providers/documentation/", ""
).replace(".mdx", "")
- provider_name = match.group(
- 1).replace("Provider", "").strip()
+ provider_name = match.group(1).replace("Provider", "").strip()
# Due to https://github.com/keephq/keep/pull/1239#discussion_r1643196800
if "Slack" in provider_name:
diff --git a/tests/conftest.py b/tests/conftest.py
index 245fde4dc..0a5088d84 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,6 +1,7 @@
import inspect
import os
import random
+import time
import uuid
from datetime import datetime, timedelta, timezone
from unittest.mock import Mock, patch
@@ -473,6 +474,7 @@ def _create_valid_event(d, lastReceived=None):
event = {
"id": str(uuid.uuid4()),
"name": "some-test-event",
+ "status": "firing",
"lastReceived": (
str(lastReceived)
if lastReceived
@@ -488,6 +490,8 @@ def setup_alerts(elastic_client, db_session, request):
alert_details = request.param.get("alert_details")
alerts = []
for i, detail in enumerate(alert_details):
+ # sleep to avoid same lastReceived
+ time.sleep(0.02)
detail["fingerprint"] = f"test-{i}"
alerts.append(
Alert(
diff --git a/tests/test_alert_correlation.py b/tests/test_alert_correlation.py
new file mode 100644
index 000000000..aaa06be16
--- /dev/null
+++ b/tests/test_alert_correlation.py
@@ -0,0 +1,133 @@
+import os
+import pytest
+import random
+import numpy as np
+
+from datetime import datetime, timedelta
+from unittest.mock import patch, MagicMock, AsyncMock
+from keep.api.models.db.alert import Alert
+from keep.api.models.db.tenant import Tenant
+from ee.experimental.incident_utils import mine_incidents_and_create_objects, calculate_pmi_matrix, DEFAULT_TEMP_DIR_LOCATION
+
+random.seed(42)
+
+@pytest.mark.asyncio
+async def test_mine_incidents_and_create_objects(db_session, tenant_id='test', n_alerts=10000, n_fingerprints=50):
+ # Add alerts
+ current_time = datetime.now()
+ time_lags = [int(round(random.normalvariate(mu=60*24*30/2, sigma=60*24*30/6))) for _ in range(n_alerts)]
+ alerts = [
+ Alert(
+ tenant_id=tenant_id,
+ provider_type="test",
+ provider_id="test",
+ event={
+ "id": f"test-{i}",
+ "name": f"Test Alert {i}",
+ "fingerprint": f"fp-{i % n_fingerprints}",
+ "lastReceived": (current_time - timedelta(minutes=time_lags[i])).isoformat(),
+ "severity": "critical",
+ "source": ["test-source"],
+ },
+ fingerprint=f"fp-{i % n_fingerprints}",
+ timestamp=current_time - timedelta(minutes=time_lags[i])
+ )
+ for i in range(n_alerts)
+ ]
+ db_session.add_all(alerts)
+ db_session.commit()
+
+ # add Tenant
+ tenant = Tenant(
+ id=tenant_id,
+ name=tenant_id,
+ configuration={
+ "ee_enabled": True,
+ }
+ )
+ db_session.add(tenant)
+ db_session.commit()
+
+ # Mock dependencies and call the function
+ with patch('ee.experimental.incident_utils.get_pusher_client') as mock_pusher, \
+ patch('ee.experimental.incident_utils.get_pool') as mock_get_pool:
+
+ mock_pusher.return_value = MagicMock()
+ mock_pool = AsyncMock()
+ mock_get_pool.return_value = mock_pool
+
+ result = await mine_incidents_and_create_objects(None, tenant_id)
+
+ assert result is not None
+ assert mock_pusher.called
+ assert mock_get_pool.called
+
+def test_calculate_pmi_matrix(db_session, tenant_id='test', n_alerts=10000, n_fingerprints=50):
+ # Add Alerts
+ current_time = datetime.now()
+ time_lags = [int(round(random.normalvariate(mu=60*24*30/2, sigma=60*24*30/6))) for _ in range(n_alerts)]
+ alerts = [
+ Alert(
+ tenant_id=tenant_id,
+ provider_type="test",
+ provider_id="test",
+ event={
+ "id": f"test-{i}",
+ "name": f"Test Alert {i}",
+ "fingerprint": f"fp-{i % n_fingerprints}",
+ "lastReceived": (current_time - timedelta(minutes=time_lags[i])).isoformat(),
+ "severity": "critical",
+ "source": ["test-source"],
+ },
+ fingerprint=f"fp-{i % n_fingerprints}",
+ timestamp=current_time - timedelta(minutes=time_lags[i])
+ )
+ for i in range(n_alerts)
+ ]
+ db_session.add_all(alerts)
+ db_session.commit()
+
+ # add Tenant
+ tenant = Tenant(
+ id=tenant_id,
+ name=tenant_id,
+ configuration={
+ "ee_enabled": True,
+ }
+ )
+ db_session.add(tenant)
+ db_session.commit()
+
+ # Call the function
+ result = calculate_pmi_matrix(None, tenant_id)
+
+ assert result["status"] == "success"
+ pmi_matrix = result["pmi_matrix"]
+ fingerprints = result["pmi_columns"]
+ assert (np.unique(fingerprints) == np.unique([f"fp-{i % n_fingerprints}" for i in range(n_fingerprints)])).all()
+ assert pmi_matrix.shape == (n_fingerprints, n_fingerprints)
+
+
+@pytest.mark.asyncio
+async def test_mine_incidents_and_create_objects_with_no_alerts(db_session, tenant_id='test'):
+ # add Tenant
+ tenant = Tenant(
+ id=tenant_id,
+ name=tenant_id,
+ configuration={
+ "ee_enabled": True,
+ }
+ )
+
+ with patch('ee.experimental.incident_utils.get_pusher_client') as mock_pusher, \
+ patch('ee.experimental.incident_utils.get_pool') as mock_get_pool:
+
+ mock_pusher.return_value = MagicMock()
+ mock_pool = AsyncMock()
+ mock_get_pool.return_value = mock_pool
+
+ result = await mine_incidents_and_create_objects(None, tenant_id)
+
+ assert result=={"incidents": []}
+
+
diff --git a/tests/test_incidents.py b/tests/test_incidents.py
index 36fc2cb71..6156525c6 100644
--- a/tests/test_incidents.py
+++ b/tests/test_incidents.py
@@ -15,9 +15,10 @@
)
from keep.api.core.db_utils import get_json_extract_field
from keep.api.core.dependencies import SINGLE_TENANT_UUID
-from keep.api.models.alert import IncidentSeverity, AlertSeverity, AlertStatus
+from keep.api.models.alert import IncidentSeverity, AlertSeverity, AlertStatus, IncidentStatus
from keep.api.models.db.alert import Alert
-
+from keep.api.utils.enrichment_helpers import convert_db_alerts_to_dto_alerts
+from tests.fixtures.client import client, test_app
def test_get_alerts_data_for_incident(db_session, setup_stress_alerts_no_elastic):
alerts = setup_stress_alerts_no_elastic(100)
@@ -175,4 +176,62 @@ def test_get_last_incidents(db_session, create_alert):
assert all([i.severity == IncidentSeverity.LOW.order for i in incidents_sorted_by_severity])
incidents_sorted_by_severity_desc, _ = get_last_incidents(SINGLE_TENANT_UUID, is_confirmed=True, sorting=IncidentSorting.severity_desc, limit=5)
- assert all([i.severity == IncidentSeverity.CRITICAL.order for i in incidents_sorted_by_severity_desc])
\ No newline at end of file
+ assert all([i.severity == IncidentSeverity.CRITICAL.order for i in incidents_sorted_by_severity_desc])
+
+@pytest.mark.parametrize(
+ "test_app", ["NO_AUTH"], indirect=True
+)
+def test_incident_status_change(db_session, client, test_app, setup_stress_alerts_no_elastic):
+
+ alerts = setup_stress_alerts_no_elastic(100)
+ incident = create_incident_from_dict("keep", {"name": "test", "description": "test"})
+
+ add_alerts_to_incident_by_incident_id(
+ "keep",
+ incident.id,
+ [a.id for a in alerts]
+ )
+
+ incident = get_incident_by_id("keep", incident.id, with_alerts=True)
+
+ alerts_dtos = convert_db_alerts_to_dto_alerts(incident.alerts)
+ assert len([alert for alert in alerts_dtos if alert.status == AlertStatus.RESOLVED.value]) == 0
+
+ response_ack = client.post(
+ "/incidents/{}/status".format(incident.id),
+ headers={"x-api-key": "some-key"},
+ json={
+ "status": IncidentStatus.ACKNOWLEDGED.value,
+ }
+ )
+
+ assert response_ack.status_code == 200
+ data = response_ack.json()
+ assert data["id"] == str(incident.id)
+ assert data["status"] == IncidentStatus.ACKNOWLEDGED.value
+
+ incident = get_incident_by_id("keep", incident.id, with_alerts=True)
+
+ assert incident.status == IncidentStatus.ACKNOWLEDGED.value
+ alerts_dtos = convert_db_alerts_to_dto_alerts(incident.alerts)
+ assert len([alert for alert in alerts_dtos if alert.status == AlertStatus.RESOLVED.value]) == 0
+
+ response_resolved = client.post(
+ "/incidents/{}/status".format(incident.id),
+ headers={"x-api-key": "some-key"},
+ json={
+ "status": IncidentStatus.RESOLVED.value,
+ }
+ )
+
+ assert response_resolved.status_code == 200
+ data = response_resolved.json()
+ assert data["id"] == str(incident.id)
+ assert data["status"] == IncidentStatus.RESOLVED.value
+
+ incident = get_incident_by_id("keep", incident.id, with_alerts=True)
+
+ assert incident.status == IncidentStatus.RESOLVED.value
+ # All alerts are resolved as well
+ alerts_dtos = convert_db_alerts_to_dto_alerts(incident.alerts)
+ assert len([alert for alert in alerts_dtos if alert.status == AlertStatus.RESOLVED.value]) == 100
diff --git a/tests/test_workflow_execution.py b/tests/test_workflow_execution.py
index b772391fd..af04791d4 100644
--- a/tests/test_workflow_execution.py
+++ b/tests/test_workflow_execution.py
@@ -4,10 +4,11 @@
import pytest
import pytz
+from asyncio import sleep
from keep.api.core.db import get_last_workflow_execution_by_workflow_id
from keep.api.core.dependencies import SINGLE_TENANT_UUID
-from keep.api.models.alert import AlertDto, AlertStatus
+from keep.api.models.alert import AlertDto, AlertStatus, IncidentDtoIn, IncidentDto
from keep.api.models.db.workflow import Workflow
from keep.workflowmanager.workflowmanager import WorkflowManager
@@ -575,3 +576,135 @@ def test_workflow_execution_with_disabled_workflow(
assert enabled_workflow_execution.status == "success"
assert disabled_workflow_execution is None
+
+
+
+workflow_definition_4 = """workflow:
+id: incident-triggers-test-created-updated
+description: test incident triggers
+triggers:
+- type: incident
+ events:
+ - updated
+ - created
+name: created-updated
+owners: []
+services: []
+steps: []
+actions:
+- name: mock-action
+ provider:
+ type: console
+ with:
+ message: |
+ "incident: {{ incident.name }}"
+"""
+
+workflow_definition_5 = """workflow:
+id: incident-incident-triggers-test-deleted
+description: test incident triggers
+triggers:
+- type: incident
+ events:
+ - deleted
+name: deleted
+owners: []
+services: []
+steps: []
+actions:
+- name: mock-action
+ provider:
+ type: console
+ with:
+ message: |
+ "deleted incident: {{ incident.name }}"
+"""
+
+
+def test_workflow_incident_triggers(
+ db_session,
+ workflow_manager,
+):
+ workflow_created = Workflow(
+ id="incident-triggers-test-created-updated",
+ name="incident-triggers-test-created-updated",
+ tenant_id=SINGLE_TENANT_UUID,
+ description="Check that incident triggers works",
+ created_by="test@keephq.dev",
+ interval=0,
+ workflow_raw=workflow_definition_4,
+ )
+ db_session.add(workflow_created)
+ db_session.commit()
+
+ # Create the current alert
+ incident = IncidentDto(
+ id="ba9ddbb9-3a83-40fc-9ace-1e026e08ca2b",
+ user_generated_name="incident",
+ alerts_count=0,
+ alert_sources=[],
+ services=[],
+ severity="critical",
+ is_predicted=False,
+ is_confirmed=True,
+ )
+
+ # Insert the current alert into the workflow manager
+
+ def wait_workflow_execution(workflow_id):
+ # Wait for the workflow execution to complete
+ workflow_execution = None
+ count = 0
+ status = None
+ while workflow_execution is None and count < 30 and status != "success":
+ workflow_execution = get_last_workflow_execution_by_workflow_id(
+ SINGLE_TENANT_UUID, workflow_id
+ )
+ if workflow_execution is not None:
+ status = workflow_execution.status
+ time.sleep(1)
+ count += 1
+ return workflow_execution
+
+ workflow_manager.insert_incident(SINGLE_TENANT_UUID, incident, "created")
+ assert len(workflow_manager.scheduler.workflows_to_run) == 1
+
+ workflow_execution_created = wait_workflow_execution("incident-triggers-test-created-updated")
+ assert workflow_execution_created is not None
+ assert workflow_execution_created.status == "success"
+ assert workflow_execution_created.results['mock-action'] == ['"incident: incident"\n']
+ assert len(workflow_manager.scheduler.workflows_to_run) == 0
+
+ workflow_manager.insert_incident(SINGLE_TENANT_UUID, incident, "updated")
+ assert len(workflow_manager.scheduler.workflows_to_run) == 1
+ workflow_execution_updated = wait_workflow_execution("incident-triggers-test-created-updated")
+ assert workflow_execution_updated is not None
+ assert workflow_execution_updated.status == "success"
+ assert workflow_execution_updated.results['mock-action'] == ['"incident: incident"\n']
+
+ # incident-triggers-test-created-updated should not be triggered
+ workflow_manager.insert_incident(SINGLE_TENANT_UUID, incident, "deleted")
+ assert len(workflow_manager.scheduler.workflows_to_run) == 0
+
+ workflow_deleted = Workflow(
+ id="incident-triggers-test-deleted",
+ name="incident-triggers-test-deleted",
+ tenant_id=SINGLE_TENANT_UUID,
+ description="Check that incident triggers works",
+ created_by="test@keephq.dev",
+ interval=0,
+ workflow_raw=workflow_definition_5,
+ )
+ db_session.add(workflow_deleted)
+ db_session.commit()
+
+ workflow_manager.insert_incident(SINGLE_TENANT_UUID, incident, "deleted")
+ assert len(workflow_manager.scheduler.workflows_to_run) == 1
+
+ # incident-triggers-test-deleted should be triggered now
+ workflow_execution_deleted = wait_workflow_execution("incident-triggers-test-deleted")
+ assert len(workflow_manager.scheduler.workflows_to_run) == 0
+
+ assert workflow_execution_deleted is not None
+ assert workflow_execution_deleted.status == "success"
+ assert workflow_execution_deleted.results['mock-action'] == ['"deleted incident: incident"\n']
\ No newline at end of file