diff --git a/localization/v2.5.x/site/en/adminGuide/resource_group.json b/localization/v2.5.x/site/en/adminGuide/resource_group.json index 614ae1917..f27472ee4 100644 --- a/localization/v2.5.x/site/en/adminGuide/resource_group.json +++ b/localization/v2.5.x/site/en/adminGuide/resource_group.json @@ -1 +1 @@ -{"codeList":["{\n \"requests\": { \"nodeNum\": 1 },\n \"limits\": { \"nodeNum\": 1 },\n \"transfer_from\": [{ \"resource_group\": \"rg1\" }],\n \"transfer_to\": [{ \"resource_group\": \"rg2\" }]\n}\n","import pymilvus\n\n# A resource group name should be a string of 1 to 255 characters, starting with a letter or an underscore (_) and containing only numbers, letters, and underscores (_).\nname = \"rg\"\nnode_num = 0\n\n# create a resource group that exactly hold no query node.\ntry:\n utility.create_resource_group(name, config=utility.ResourceGroupConfig(\n requests={\"node_num\": node_num},\n limits={\"node_num\": node_num},\n ), using='default')\n print(f\"Succeeded in creating resource group {name}.\")\nexcept Exception:\n print(\"Failed to create the resource group.\")\n","rgs = utility.list_resource_groups(using='default')\nprint(f\"Resource group list: {rgs}\")\n\n# Resource group list: ['__default_resource_group', 'rg']\n","info = utility.describe_resource_group(name, using=\"default\")\nprint(f\"Resource group description: {info}\")\n\n# Resource group description: \n# , // string, rg name\n# , // int, num_node which has been transfer to this rg\n# , // int, available node_num, some node may shutdown\n# , // map[string]int, from collection_name to loaded replica of each collecion in this rg\n# , // map[string]int, from collection_name to outgoging accessed node num by replica loaded in this rg \n# . // map[string]int, from collection_name to incoming accessed node num by replica loaded in other rg\n","source = '__default_resource_group'\ntarget = 'rg'\nexpected_num_nodes_in_default = 0\nexpected_num_nodes_in_rg = 1\n\ntry:\n utility.update_resource_groups({\n source: ResourceGroupConfig(\n requests={\"node_num\": expected_num_nodes_in_default},\n limits={\"node_num\": expected_num_nodes_in_default},\n ),\n target: ResourceGroupConfig(\n requests={\"node_num\": expected_num_nodes_in_rg},\n limits={\"node_num\": expected_num_nodes_in_rg},\n )\n }, using=\"default\")\n print(f\"Succeeded in move 1 node(s) from {source} to {target}.\")\nexcept Exception:\n print(\"Something went wrong while moving nodes.\")\n\n# After a while, succeeded in moving 1 node(s) from __default_resource_group to rg.\n","from pymilvus import Collection\n\ncollection = Collection('demo')\n\n# Milvus loads the collection to the default resource group.\ncollection.load(replica_number=2)\n\n# Or, you can ask Milvus load the collection to the desired resource group.\n# make sure that query nodes num should be greater or equal to replica_number\nresource_groups = ['rg']\ncollection.load(replica_number=2, _resource_groups=resource_groups) \n","collection = Collection(\"Books\")\n\n# Use the load method of a collection to load one of its partition\ncollection.load([\"Novels\"], replica_number=2, _resource_groups=resource_groups)\n\n# Or, you can use the load method of a partition directly\npartition = Partition(collection, \"Novels\")\npartition.load(replica_number=2, _resource_groups=resource_groups)\n","source = '__default_resource_group'\ntarget = 'rg'\ncollection_name = 'c'\nnum_replicas = 1\n\ntry:\n utility.transfer_replica(source, target, collection_name, num_replicas, using=\"default\")\n print(f\"Succeeded in moving {num_node} replica(s) of {collection_name} from {source} to {target}.\")\nexcept Exception:\n print(\"Something went wrong while moving replicas.\")\n\n# Succeeded in moving 1 replica(s) of c from __default_resource_group to rg.\n","try:\n utility.update_resource_groups({\n \"rg\": utility.ResourceGroupConfig(\n requests={\"node_num\": 0},\n limits={\"node_num\": 0},\n ),\n }, using=\"default\")\n utility.drop_resource_group(\"rg\", using=\"default\")\n print(f\"Succeeded in dropping {source}.\")\nexcept Exception:\n print(f\"Something went wrong while dropping {source}.\")\n","from pymilvus import utility\nfrom pymilvus.client.types import ResourceGroupConfig\n\n_PENDING_NODES_RESOURCE_GROUP=\"__pending_nodes\"\n\ndef init_cluster(node_num: int):\n print(f\"Init cluster with {node_num} nodes, all nodes will be put in default resource group\")\n # create a pending resource group, which can used to hold the pending nodes that do not hold any data.\n utility.create_resource_group(name=_PENDING_NODES_RESOURCE_GROUP, config=ResourceGroupConfig(\n requests={\"node_num\": 0}, # this resource group can hold 0 nodes, no data will be load on it.\n limits={\"node_num\": 10000}, # this resource group can hold at most 10000 nodes \n ))\n\n # update default resource group, which can used to hold the nodes that all initial node in it.\n utility.update_resource_groups({\n \"__default_resource_group\": ResourceGroupConfig(\n requests={\"node_num\": node_num},\n limits={\"node_num\": node_num},\n transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover missing node from pending resource group at high priority.\n transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover redundant node to pending resource group at low priority.\n )})\n utility.create_resource_group(name=\"rg1\", config=ResourceGroupConfig(\n requests={\"node_num\": 0},\n limits={\"node_num\": 0},\n transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n ))\n utility.create_resource_group(name=\"rg2\", config=ResourceGroupConfig(\n requests={\"node_num\": 0},\n limits={\"node_num\": 0},\n transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n ))\n\ninit_cluster(1)\n","\ndef scale_to(node_num: int):\n # scale the querynode number in Milvus into node_num.\n pass\n","# scale rg1 into 3 nodes, rg2 into 1 nodes\nutility.update_resource_groups({\n \"rg1\": ResourceGroupConfig(\n requests={\"node_num\": 3},\n limits={\"node_num\": 3},\n transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n ),\n \"rg2\": ResourceGroupConfig(\n requests={\"node_num\": 1},\n limits={\"node_num\": 1},\n transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n ),\n})\nscale_to(5)\n# rg1 has 3 nodes, rg2 has 1 node, __default_resource_group has 1 node.\n","# scale rg1 from 3 nodes into 2 nodes\nutility.update_resource_groups({\n \"rg1\": ResourceGroupConfig(\n requests={\"node_num\": 2},\n limits={\"node_num\": 2},\n transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n ),\n})\n\n# rg1 has 2 nodes, rg2 has 1 node, __default_resource_group has 1 node, __pending_nodes has 1 node.\nscale_to(4)\n# scale the node in __pending_nodes\n"],"headingContent":"Manage Resource Groups","anchorList":[{"label":"Manage Resource Groups","href":"Manage-Resource-Groups","type":1,"isActive":false},{"label":"What is a resource group","href":"What-is-a-resource-group","type":2,"isActive":false},{"label":"Concepts of resource group","href":"Concepts-of-resource-group","type":2,"isActive":false},{"label":"Use declarative api to manage resource group","href":"Use-declarative-api-to-manage-resource-group","type":2,"isActive":false},{"label":"A good practice to manage cluster scaling","href":"A-good-practice-to-manage-cluster-scaling","type":2,"isActive":false},{"label":"How resource groups interacts with multiple replicas","href":"How-resource-groups-interacts-with-multiple-replicas","type":2,"isActive":false},{"label":"What's next","href":"Whats-next","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["{\n \"requests\": { \"nodeNum\": 1 },\n \"limits\": { \"nodeNum\": 1 },\n \"transfer_from\": [{ \"resource_group\": \"rg1\" }],\n \"transfer_to\": [{ \"resource_group\": \"rg2\" }]\n}\n","import pymilvus\n\n# A resource group name should be a string of 1 to 255 characters, starting with a letter or an underscore (_) and containing only numbers, letters, and underscores (_).\nname = \"rg\"\nnode_num = 0\n\n# create a resource group that exactly hold no query node.\ntry:\n milvus_client.create_resource_group(name, config=ResourceGroupConfig(\n requests={\"node_num\": node_num},\n limits={\"node_num\": node_num},\n ))\n print(f\"Succeeded in creating resource group {name}.\")\nexcept Exception:\n print(\"Failed to create the resource group.\")\n","rgs = milvus_client.list_resource_groups()\nprint(f\"Resource group list: {rgs}\")\n\n# Resource group list: ['__default_resource_group', 'rg']\n","info = milvus_client.describe_resource_group(name)\nprint(f\"Resource group description: {info}\")\n\n# Resource group description: \n# ResourceGroupInfo:\n# , // resource group name\n# , // resource group capacity\n# , // resource group node num\n# , // collection loaded replica num in resource group\n# , // node num which still in use by replica in other resource group\n# , // node num which is in use by replica but belong to other resource group \n# , // resource group config\n# // node detail info\n","source = '__default_resource_group'\ntarget = 'rg'\nexpected_num_nodes_in_default = 0\nexpected_num_nodes_in_rg = 1\n\ntry:\n milvus_client.update_resource_groups({\n source: ResourceGroupConfig(\n requests={\"node_num\": expected_num_nodes_in_default},\n limits={\"node_num\": expected_num_nodes_in_default},\n ),\n target: ResourceGroupConfig(\n requests={\"node_num\": expected_num_nodes_in_rg},\n limits={\"node_num\": expected_num_nodes_in_rg},\n )\n })\n print(f\"Succeeded in move 1 node(s) from {source} to {target}.\")\nexcept Exception:\n print(\"Something went wrong while moving nodes.\")\n\n# After a while, succeeded in moving 1 node(s) from __default_resource_group to rg.\n","from pymilvus import Collection\n\ncollection_name = \"demo\"\n\n# Milvus loads the collection to the default resource group.\nmilvus_client.load_collection(collection_name, replica_number=2)\n\n# Or, you can ask Milvus load the collection to the desired resource group.\n# make sure that query nodes num should be greater or equal to replica_number\nresource_groups = ['rg']\nmilvus_client.load_collection(replica_number=2, _resource_groups=resource_groups) \n","collection = \"Books\"\npartition = \"Novels\"\n\n# Use the load method of a collection to load one of its partition\nmilvus_client.load_partitions(collection, [partition], replica_number=2, _resource_groups=resource_groups)\n","source = '__default_resource_group'\ntarget = 'rg'\ncollection_name = 'c'\nnum_replicas = 1\n\ntry:\n milvus_client.transfer_replica(source, target, collection_name, num_replicas)\n print(f\"Succeeded in moving {num_replicas} replica(s) of {collection_name} from {source} to {target}.\")\nexcept Exception:\n print(\"Something went wrong while moving replicas.\")\n\n# Succeeded in moving 1 replica(s) of c from __default_resource_group to rg.\n","resource_group = \"rg\ntry:\n milvus_client.update_resource_groups({\n resource_group: ResourceGroupConfig(\n requests={\"node_num\": 0},\n limits={\"node_num\": 0},\n ),\n })\n milvus_client.drop_resource_group(resource_group)\n print(f\"Succeeded in dropping {resource_group}.\")\nexcept Exception:\n print(f\"Something went wrong while dropping {resource_group}.\")\n","from pymilvus.client.types import ResourceGroupConfig\n\n_PENDING_NODES_RESOURCE_GROUP=\"__pending_nodes\"\n\ndef init_cluster(node_num: int):\n print(f\"Init cluster with {node_num} nodes, all nodes will be put in default resource group\")\n # create a pending resource group, which can used to hold the pending nodes that do not hold any data.\n milvus_client.create_resource_group(name=_PENDING_NODES_RESOURCE_GROUP, config=ResourceGroupConfig(\n requests={\"node_num\": 0}, # this resource group can hold 0 nodes, no data will be load on it.\n limits={\"node_num\": 10000}, # this resource group can hold at most 10000 nodes \n ))\n\n # update default resource group, which can used to hold the nodes that all initial node in it.\n milvus_client.update_resource_groups({\n \"__default_resource_group\": ResourceGroupConfig(\n requests={\"node_num\": node_num},\n limits={\"node_num\": node_num},\n transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover missing node from pending resource group at high priority.\n transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover redundant node to pending resource group at low priority.\n )})\n milvus_client.create_resource_group(name=\"rg1\", config=ResourceGroupConfig(\n requests={\"node_num\": 0},\n limits={\"node_num\": 0},\n transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n ))\n milvus_client.create_resource_group(name=\"rg2\", config=ResourceGroupConfig(\n requests={\"node_num\": 0},\n limits={\"node_num\": 0},\n transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n ))\n\ninit_cluster(1)\n","\ndef scale_to(node_num: int):\n # scale the querynode number in Milvus into node_num.\n pass\n","# scale rg1 into 3 nodes, rg2 into 1 nodes\nmilvus_client.update_resource_groups({\n \"rg1\": ResourceGroupConfig(\n requests={\"node_num\": 3},\n limits={\"node_num\": 3},\n transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n ),\n \"rg2\": ResourceGroupConfig(\n requests={\"node_num\": 1},\n limits={\"node_num\": 1},\n transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n ),\n})\nscale_to(5)\n# rg1 has 3 nodes, rg2 has 1 node, __default_resource_group has 1 node.\n","# scale rg1 from 3 nodes into 2 nodes\nmilvus_client.update_resource_groups({\n \"rg1\": ResourceGroupConfig(\n requests={\"node_num\": 2},\n limits={\"node_num\": 2},\n transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n ),\n})\n\n# rg1 has 2 nodes, rg2 has 1 node, __default_resource_group has 1 node, __pending_nodes has 1 node.\nscale_to(4)\n# scale the node in __pending_nodes\n"],"headingContent":"Manage Resource Groups","anchorList":[{"label":"Manage Resource Groups","href":"Manage-Resource-Groups","type":1,"isActive":false},{"label":"What is a resource group","href":"What-is-a-resource-group","type":2,"isActive":false},{"label":"Concepts of resource group","href":"Concepts-of-resource-group","type":2,"isActive":false},{"label":"Use declarative api to manage resource group","href":"Use-declarative-api-to-manage-resource-group","type":2,"isActive":false},{"label":"A good practice to manage cluster scaling","href":"A-good-practice-to-manage-cluster-scaling","type":2,"isActive":false},{"label":"How resource groups interacts with multiple replicas","href":"How-resource-groups-interacts-with-multiple-replicas","type":2,"isActive":false},{"label":"What's next","href":"Whats-next","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.5.x/site/en/adminGuide/resource_group.md b/localization/v2.5.x/site/en/adminGuide/resource_group.md index 220642687..d55a1b626 100644 --- a/localization/v2.5.x/site/en/adminGuide/resource_group.md +++ b/localization/v2.5.x/site/en/adminGuide/resource_group.md @@ -103,10 +103,10 @@ node_num = 0 # create a resource group that exactly hold no query node. try: - utility.create_resource_group(name, config=utility.ResourceGroupConfig( + milvus_client.create_resource_group(name, config=ResourceGroupConfig( requests={"node_num": node_num}, limits={"node_num": node_num}, - ), using='default') + )) print(f"Succeeded in creating resource group {name}.") except Exception: print("Failed to create the resource group.") @@ -114,23 +114,26 @@ node_num = 0
  • List resource groups.

    Once you create a resource group, you can see it in the resource group list.

    To view the list of resource groups in a Milvus instance, do as follows:

    -
    rgs = utility.list_resource_groups(using='default')
    +
    rgs = milvus_client.list_resource_groups()
     print(f"Resource group list: {rgs}")
     
     # Resource group list: ['__default_resource_group', 'rg']
     
  • Describe a resource group.

    You can have Milvus describe a resource group in concern as follows:

    -
    info = utility.describe_resource_group(name, using="default")
    +
    info = milvus_client.describe_resource_group(name)
     print(f"Resource group description: {info}")
     
     # Resource group description: 
    -#        <name:"rg">,           // string, rg name
    -#        <capacity:1>,            // int, num_node which has been transfer to this rg
    -#        <num_available_node:0>,  // int, available node_num, some node may shutdown
    -#        <num_loaded_replica:{}>, // map[string]int, from collection_name to loaded replica of each collecion in this rg
    -#        <num_outgoing_node:{}>,  // map[string]int, from collection_name to outgoging accessed node num by replica loaded in this rg 
    -#        <num_incoming_node:{}>.  // map[string]int, from collection_name to incoming accessed node num by replica loaded in other rg
    +# ResourceGroupInfo:
    +#   <name:rg1>,     // resource group name
    +#   <capacity:0>,   // resource group capacity
    +#   <num_available_node:1>,  // resource group node num
    +#   <num_loaded_replica:{}>, // collection loaded replica num in resource group
    +#   <num_outgoing_node:{}>, // node num which still in use by replica in other resource group
    +#   <num_incoming_node:{}>, // node num which is in use by replica but belong to other resource group 
    +#   <config:{}>,            // resource group config
    +#   <nodes:[]>              // node detail info
     
  • Transfer nodes between resource groups.

    You may notice that the described resource group does not have any query node yet. Move some nodes from the default resource group to the one you create as follows: @@ -142,7 +145,7 @@ expected_num_nodes_in_default = 0 expected_num_nodes_in_rg = 1 try: - utility.update_resource_groups({ + milvus_client.update_resource_groups({ source: ResourceGroupConfig( requests={"node_num": expected_num_nodes_in_default}, limits={"node_num": expected_num_nodes_in_default}, @@ -151,7 +154,7 @@ expected_num_nodes_in_rg = 1 requests={"node_num": expected_num_nodes_in_rg}, limits={"node_num": expected_num_nodes_in_rg}, ) - }, using="default") + }) print(f"Succeeded in move 1 node(s) from {source} to {target}.") except Exception: print("Something went wrong while moving nodes.") @@ -162,25 +165,22 @@ expected_num_nodes_in_rg = 1

    Once there are query nodes in a resource group, you can load collections to this resource group. The following snippet assumes that a collection named demo already exists.

    from pymilvus import Collection
     
    -collection = Collection('demo')
    +collection_name = "demo"
     
     # Milvus loads the collection to the default resource group.
    -collection.load(replica_number=2)
    +milvus_client.load_collection(collection_name, replica_number=2)
     
     # Or, you can ask Milvus load the collection to the desired resource group.
     # make sure that query nodes num should be greater or equal to replica_number
     resource_groups = ['rg']
    -collection.load(replica_number=2, _resource_groups=resource_groups) 
    +milvus_client.load_collection(replica_number=2, _resource_groups=resource_groups) 
     

    Also, you can just load a partition into a resource group and have its replicas distributed among several resource groups. The following assumes that a collection named Books already exists and it has a partition named Novels.

    -
    collection = Collection("Books")
    +
    collection = "Books"
    +partition = "Novels"
     
     # Use the load method of a collection to load one of its partition
    -collection.load(["Novels"], replica_number=2, _resource_groups=resource_groups)
    -
    -# Or, you can use the load method of a partition directly
    -partition = Partition(collection, "Novels")
    -partition.load(replica_number=2, _resource_groups=resource_groups)
    +milvus_client.load_partitions(collection, [partition], replica_number=2, _resource_groups=resource_groups)
     

    Note that _resource_groups is an optional parameter, and leaving it unspecified have Milvus load the replicas onto the query nodes in the default resource group.

    To have Milus load each replica of a collection in a separate resource group, ensure that the number of resource groups equals the number of replicas.

  • @@ -192,8 +192,8 @@ collection_name = 'c' num_replicas = 1 try: - utility.transfer_replica(source, target, collection_name, num_replicas, using="default") - print(f"Succeeded in moving {num_node} replica(s) of {collection_name} from {source} to {target}.") + milvus_client.transfer_replica(source, target, collection_name, num_replicas) + print(f"Succeeded in moving {num_replicas} replica(s) of {collection_name} from {source} to {target}.") except Exception: print("Something went wrong while moving replicas.") @@ -201,18 +201,19 @@ num_replicas = 1
  • Drop a resource group.

    You can drop a resource group that hold no query node (limits.node_num = 0) at any time. In this guide, resource group rg now has one query node. You need to change the configuration limits.node_num of resource group into zero first.

    -
    try:
    -    utility.update_resource_groups({
    -        "rg": utility.ResourceGroupConfig(
    -            requests={"node_num": 0},
    -            limits={"node_num": 0},
    +
    resource_group = "rg
    +try:
    +    milvus_client.update_resource_groups({
    +        resource_group: ResourceGroupConfig(
    +            requests={"node_num": 0},
    +            limits={"node_num": 0},
             ),
    -    }, using="default")
    -    utility.drop_resource_group("rg", using="default")
    -    print(f"Succeeded in dropping {source}.")
    -except Exception:
    -    print(f"Something went wrong while dropping {source}.")
    -
  • + }) + milvus_client.drop_resource_group(resource_group) + print(f"Succeeded in dropping {resource_group}.") +except Exception: + print(f"Something went wrong while dropping {resource_group}.") +

    For more details, please refer to the relevant examples in pymilvus

    A good practice to manage cluster scaling

    We can use the API to scale a specific resource group to a designated number of QueryNodes without affecting any other resource groups.

    # scale rg1 into 3 nodes, rg2 into 1 nodes
    -utility.update_resource_groups({
    +milvus_client.update_resource_groups({
         "rg1": ResourceGroupConfig(
             requests={"node_num": 3},
             limits={"node_num": 3},
    @@ -302,7 +302,7 @@ scale_to(5)
     
  • Cluster scale in

    Similarly, we can establish scaling-in rules that prioritize selecting QueryNodes from __pending_nodes resource group. This information can be obtained through the describe_resource_group API. Achieving the goal of scaling-in specified resource group.

    # scale rg1 from 3 nodes into 2 nodes
    -utility.update_resource_groups({
    +milvus_client.update_resource_groups({
         "rg1": ResourceGroupConfig(
             requests={"node_num": 2},
             limits={"node_num": 2},
    diff --git a/localization/v2.5.x/site/en/userGuide/insert-and-delete/delete-entities.md b/localization/v2.5.x/site/en/userGuide/insert-and-delete/delete-entities.md
    index 06c42e72c..eee25c6d6 100644
    --- a/localization/v2.5.x/site/en/userGuide/insert-and-delete/delete-entities.md
    +++ b/localization/v2.5.x/site/en/userGuide/insert-and-delete/delete-entities.md
    @@ -33,7 +33,7 @@ title: Delete Entities
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    
  • When deleting multiple entities that share some attributes in a batch, you can use filter expressions. The example code below uses the in operator to bulk delete all Entities with thier color field set to the values of red and green. You can also use other operators to construct filter expressions that meet your requirements. For more information about filter expressions, please refer to ​Metadata Filtering.​

    +

    When deleting multiple entities that share some attributes in a batch, you can use filter expressions. The example code below uses the in operator to bulk delete all Entities with their color field set to the values of red and green. You can also use other operators to construct filter expressions that meet your requirements. For more information about filter expressions, please refer to ​Metadata Filtering.​