public/Spark/New-FabricSparkCustomPool.ps1
<#
.SYNOPSIS Creates a new Spark custom pool in a specified Microsoft Fabric workspace. .DESCRIPTION This function sends a POST request to the Microsoft Fabric API to create a new Spark custom pool in the specified workspace. It supports various parameters for Spark custom pool configuration. .PARAMETER WorkspaceId The unique identifier of the workspace where the Spark custom pool will be created. This parameter is mandatory. .PARAMETER SparkCustomPoolName The name of the Spark custom pool to be created. This parameter is mandatory. .PARAMETER NodeFamily The family of nodes to be used in the Spark custom pool. This parameter is mandatory and must be 'MemoryOptimized'. .PARAMETER NodeSize The size of the nodes to be used in the Spark custom pool. This parameter is mandatory and must be one of 'Large', 'Medium', 'Small', 'XLarge', 'XXLarge'. .PARAMETER AutoScaleEnabled Specifies whether auto-scaling is enabled for the Spark custom pool. This parameter is mandatory. .PARAMETER AutoScaleMinNodeCount The minimum number of nodes for auto-scaling in the Spark custom pool. This parameter is mandatory. .PARAMETER AutoScaleMaxNodeCount The maximum number of nodes for auto-scaling in the Spark custom pool. This parameter is mandatory. .PARAMETER DynamicExecutorAllocationEnabled Specifies whether dynamic executor allocation is enabled for the Spark custom pool. This parameter is mandatory. .PARAMETER DynamicExecutorAllocationMinExecutors The minimum number of executors for dynamic executor allocation in the Spark custom pool. This parameter is mandatory. .PARAMETER DynamicExecutorAllocationMaxExecutors The maximum number of executors for dynamic executor allocation in the Spark custom pool. This parameter is mandatory. .EXAMPLE New-FabricSparkCustomPool -WorkspaceId "workspace-12345" -SparkCustomPoolName "New Spark Pool" -NodeFamily "MemoryOptimized" -NodeSize "Large" -AutoScaleEnabled $true -AutoScaleMinNodeCount 1 -AutoScaleMaxNodeCount 10 -DynamicExecutorAllocationEnabled $true -DynamicExecutorAllocationMinExecutors 1 -DynamicExecutorAllocationMaxExecutors 10 This example creates a new Spark custom pool named "New Spark Pool" in the workspace with ID "workspace-12345" with the specified configuration. .NOTES - Requires `$FabricConfig` global configuration, including `BaseUrl` and `FabricHeaders`. - Calls `Test-TokenExpired` to ensure token validity before making the API request. Author: Tiago Balabuch #> function New-FabricSparkCustomPool { [CmdletBinding()] param ( [Parameter(Mandatory = $true)] [ValidateNotNullOrEmpty()] [string]$WorkspaceId, [Parameter(Mandatory = $true)] [ValidateNotNullOrEmpty()] [ValidatePattern('^[a-zA-Z0-9_ ]*$')] [string]$SparkCustomPoolName, [Parameter(Mandatory = $true)] [ValidateNotNullOrEmpty()] [ValidateSet('MemoryOptimized')] [string]$NodeFamily, [Parameter(Mandatory = $true)] [ValidateNotNullOrEmpty()] [ValidateSet('Large', 'Medium', 'Small', 'XLarge', 'XXLarge')] [string]$NodeSize, [Parameter(Mandatory = $true)] [ValidateNotNullOrEmpty()] [bool]$AutoScaleEnabled, [Parameter(Mandatory = $true)] [ValidateNotNullOrEmpty()] [int]$AutoScaleMinNodeCount, [Parameter(Mandatory = $true)] [ValidateNotNullOrEmpty()] [int]$AutoScaleMaxNodeCount, [Parameter(Mandatory = $true)] [ValidateNotNullOrEmpty()] [bool]$DynamicExecutorAllocationEnabled, [Parameter(Mandatory = $true)] [ValidateNotNullOrEmpty()] [int]$DynamicExecutorAllocationMinExecutors, [Parameter(Mandatory = $true)] [ValidateNotNullOrEmpty()] [int]$DynamicExecutorAllocationMaxExecutors ) try { # Step 1: Ensure token validity Write-Message -Message "Validating token..." -Level Debug Test-TokenExpired Write-Message -Message "Token validation completed." -Level Debug # Step 2: Construct the API URL $apiEndpointUrl = "{0}/workspaces/{1}/spark/pools" -f $FabricConfig.BaseUrl, $WorkspaceId Write-Message -Message "API Endpoint: $apiEndpointUrl" -Level Debug # Step 3: Construct the request body $body = @{ name = $SparkCustomPoolName nodeFamily = $NodeFamily nodeSize = $NodeSize autoScale = @{ enabled = $AutoScaleEnabled minNodeCount = $AutoScaleMinNodeCount maxNodeCount = $AutoScaleMaxNodeCount } dynamicExecutorAllocation = @{ enabled = $DynamicExecutorAllocationEnabled minExecutors = $DynamicExecutorAllocationMinExecutors maxExecutors = $DynamicExecutorAllocationMaxExecutors } } $bodyJson = $body | ConvertTo-Json -Depth 10 Write-Message -Message "Request Body: $bodyJson" -Level Debug # Step 4: Make the API request $response = Invoke-RestMethod ` -Headers $FabricConfig.FabricHeaders ` -Uri $apiEndpointUrl ` -Method Post ` -Body $bodyJson ` -ContentType "application/json" ` -ErrorAction Stop ` -SkipHttpErrorCheck ` -ResponseHeadersVariable "responseHeader" ` -StatusCodeVariable "statusCode" # Step 5: Handle and log the response switch ($statusCode) { 201 { Write-Message -Message "SparkCustomPool '$SparkCustomPoolName' created successfully!" -Level Info return $response } 202 { Write-Message -Message "SparkCustomPool '$SparkCustomPoolName' creation accepted. Provisioning in progress!" -Level Info [string]$operationId = $responseHeader["x-ms-operation-id"] [string]$location = $responseHeader["Location"] [string]$retryAfter = $responseHeader["Retry-After"] Write-Message -Message "Operation ID: '$operationId'" -Level Debug Write-Message -Message "Location: '$location'" -Level Debug Write-Message -Message "Retry-After: '$retryAfter'" -Level Debug Write-Message -Message "Getting Long Running Operation status" -Level Debug $operationStatus = Get-FabricLongRunningOperation -operationId $operationId Write-Message -Message "Long Running Operation status: $operationStatus" -Level Debug # Handle operation result if ($operationStatus.status -eq "Succeeded") { Write-Message -Message "Operation Succeeded" -Level Debug Write-Message -Message "Getting Long Running Operation result" -Level Debug $operationResult = Get-FabricLongRunningOperationResult -operationId $operationId, -location $location Write-Message -Message "Long Running Operation status: $operationResult" -Level Debug return $operationResult } else { Write-Message -Message "Operation failed. Status: $($operationStatus)" -Level Debug Write-Message -Message "Operation failed. Status: $($operationStatus)" -Level Error return $operationStatus } } default { Write-Message -Message "Unexpected response code: $statusCode from the API." -Level Error Write-Message -Message "Error: $($response.message)" -Level Error Write-Message -Message "Error Details: $($response.moreDetails)" -Level Error Write-Message "Error Code: $($response.errorCode)" -Level Error throw "API request failed with status code $statusCode." } } } catch { # Step 6: Handle and log errors $errorDetails = $_.Exception.Message Write-Message -Message "Failed to create SparkCustomPool. Error: $errorDetails" -Level Error } } |