fix: README
This commit is contained in:
parent
502b8cefc1
commit
afa90e9839
1 changed files with 158 additions and 158 deletions
316
README.md
316
README.md
|
@ -26,208 +26,208 @@ zig build
|
||||||
<details>
|
<details>
|
||||||
<summary>Python</summary>
|
<summary>Python</summary>
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from typing import Union
|
from typing import Union
|
||||||
|
|
||||||
from fastapi import FastAPI
|
from fastapi import FastAPI
|
||||||
|
|
||||||
app = FastAPI()
|
app = FastAPI()
|
||||||
|
|
||||||
|
|
||||||
@app.get("/")
|
@app.get("/")
|
||||||
def read_root():
|
def read_root():
|
||||||
return {"Hello": "World"}
|
return {"Hello": "World"}
|
||||||
|
|
||||||
|
|
||||||
@app.get("/items/{item_id}")
|
@app.get("/items/{item_id}")
|
||||||
def read_item(item_id: int, q: Union[str, None] = None):
|
def read_item(item_id: int, q: Union[str, None] = None):
|
||||||
return {"item_id": item_id, "q": q}
|
return {"item_id": item_id, "q": q}
|
||||||
```
|
```
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
```
|
```
|
||||||
File: test.py
|
File: test.py
|
||||||
Language: python
|
Language: python
|
||||||
|
|
||||||
var app = FastAPI();
|
var app = FastAPI();
|
||||||
func read_root() -> void;
|
func read_root() -> void;
|
||||||
func read_item() -> void;
|
func read_item() -> void;
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>Zig</summary>
|
<summary>Zig</summary>
|
||||||
|
|
||||||
```zig
|
```zig
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
|
|
||||||
const root = @import("root.zig");
|
const root = @import("root.zig");
|
||||||
const E = root.E;
|
const E = root.E;
|
||||||
const P3 = root.P3;
|
const P3 = root.P3;
|
||||||
const Vec3 = root.Vec3;
|
const Vec3 = root.Vec3;
|
||||||
const Sphere = root.Sphere;
|
const Sphere = root.Sphere;
|
||||||
const Ray = root.Ray;
|
const Ray = root.Ray;
|
||||||
const Interval = root.Interval;
|
const Interval = root.Interval;
|
||||||
|
|
||||||
pub const Hittable = union(enum) {
|
pub const Hittable = union(enum) {
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
sphere: Sphere,
|
sphere: Sphere,
|
||||||
|
|
||||||
pub fn initSphere(center: [3]E, radius: E) Self {
|
pub fn initSphere(center: [3]E, radius: E) Self {
|
||||||
return .{ .sphere = Sphere.init(center, radius) };
|
return .{ .sphere = Sphere.init(center, radius) };
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: Self) void {
|
||||||
|
switch (self) {
|
||||||
|
inline else => |hittable| hittable.deinit(),
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn deinit(self: Self) void {
|
pub const Collision = struct {
|
||||||
switch (self) {
|
const Inner = @This();
|
||||||
inline else => |hittable| hittable.deinit(),
|
pub const Face = enum { front, back };
|
||||||
}
|
t: E,
|
||||||
}
|
p: P3,
|
||||||
|
normal: Vec3,
|
||||||
pub const Collision = struct {
|
face: Face,
|
||||||
const Inner = @This();
|
|
||||||
pub const Face = enum { front, back };
|
|
||||||
t: E,
|
|
||||||
p: P3,
|
|
||||||
normal: Vec3,
|
|
||||||
face: Face,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub fn collisionAt(self: Self, interval: Interval, ray: *const Ray) ?Collision {
|
|
||||||
switch (self) {
|
|
||||||
inline else => |hittable| return hittable.collisionAt(interval, ray),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
```
|
|
||||||
|
|
||||||
Result:
|
pub fn collisionAt(self: Self, interval: Interval, ray: *const Ray) ?Collision {
|
||||||
|
switch (self) {
|
||||||
|
inline else => |hittable| return hittable.collisionAt(interval, ray),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
```
|
Result:
|
||||||
File: /Users/bogdanbuduroiu/development/bruvduroiu/raytracing.zig/src/hittable.zig
|
|
||||||
Language: zig
|
|
||||||
|
|
||||||
var t;
|
```
|
||||||
var p;
|
File: /Users/bogdanbuduroiu/development/bruvduroiu/raytracing.zig/src/hittable.zig
|
||||||
var normal;
|
Language: zig
|
||||||
var face;
|
|
||||||
```
|
var t;
|
||||||
|
var p;
|
||||||
|
var normal;
|
||||||
|
var face;
|
||||||
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>Go</summary>
|
<summary>Go</summary>
|
||||||
|
|
||||||
```go
|
```go
|
||||||
package batch_sliding_window
|
package batch_sliding_window
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"go.temporal.io/sdk/temporal"
|
"go.temporal.io/sdk/temporal"
|
||||||
"go.temporal.io/sdk/workflow"
|
"go.temporal.io/sdk/workflow"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProcessBatchWorkflowInput input of the ProcessBatchWorkflow.
|
// ProcessBatchWorkflowInput input of the ProcessBatchWorkflow.
|
||||||
// A single input structure is preferred to multiple workflow arguments to simplify backward compatible API changes.
|
// A single input structure is preferred to multiple workflow arguments to simplify backward compatible API changes.
|
||||||
type ProcessBatchWorkflowInput struct {
|
type ProcessBatchWorkflowInput struct {
|
||||||
PageSize int // Number of children started by a single sliding window workflow run
|
PageSize int // Number of children started by a single sliding window workflow run
|
||||||
SlidingWindowSize int // Maximum number of children to run in parallel.
|
SlidingWindowSize int // Maximum number of children to run in parallel.
|
||||||
Partitions int // How many sliding windows to run in parallel.
|
Partitions int // How many sliding windows to run in parallel.
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessBatchWorkflow sample Partitions the data set into continuous ranges.
|
||||||
|
// A real application can choose any other way to divide the records into multiple collections.
|
||||||
|
func ProcessBatchWorkflow(ctx workflow.Context, input ProcessBatchWorkflowInput) (processed int, err error) {
|
||||||
|
ctx = workflow.WithActivityOptions(ctx, workflow.ActivityOptions{
|
||||||
|
StartToCloseTimeout: 5 * time.Second,
|
||||||
|
})
|
||||||
|
|
||||||
|
var recordLoader *RecordLoader // RecordLoader activity reference
|
||||||
|
var recordCount int
|
||||||
|
err = workflow.ExecuteActivity(ctx, recordLoader.GetRecordCount).Get(ctx, &recordCount)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcessBatchWorkflow sample Partitions the data set into continuous ranges.
|
if input.SlidingWindowSize < input.Partitions {
|
||||||
// A real application can choose any other way to divide the records into multiple collections.
|
return 0, temporal.NewApplicationError(
|
||||||
func ProcessBatchWorkflow(ctx workflow.Context, input ProcessBatchWorkflowInput) (processed int, err error) {
|
"SlidingWindowSize cannot be less than number of partitions", "invalidInput")
|
||||||
ctx = workflow.WithActivityOptions(ctx, workflow.ActivityOptions{
|
}
|
||||||
StartToCloseTimeout: 5 * time.Second,
|
partitions := divideIntoPartitions(recordCount, input.Partitions)
|
||||||
})
|
windowSizes := divideIntoPartitions(input.SlidingWindowSize, input.Partitions)
|
||||||
|
|
||||||
var recordLoader *RecordLoader // RecordLoader activity reference
|
workflow.GetLogger(ctx).Info("ProcessBatchWorkflow",
|
||||||
var recordCount int
|
"input", input,
|
||||||
err = workflow.ExecuteActivity(ctx, recordLoader.GetRecordCount).Get(ctx, &recordCount)
|
"recordCount", recordCount,
|
||||||
|
"partitions", partitions,
|
||||||
|
"windowSizes", windowSizes)
|
||||||
|
|
||||||
|
var results []workflow.ChildWorkflowFuture
|
||||||
|
offset := 0
|
||||||
|
for i := 0; i < input.Partitions; i++ {
|
||||||
|
// Makes child id more user-friendly
|
||||||
|
childId := fmt.Sprintf("%s/%d", workflow.GetInfo(ctx).WorkflowExecution.ID, i)
|
||||||
|
childCtx := workflow.WithChildOptions(ctx, workflow.ChildWorkflowOptions{WorkflowID: childId})
|
||||||
|
// Define partition boundaries.
|
||||||
|
maximumPartitionOffset := offset + partitions[i]
|
||||||
|
if maximumPartitionOffset > recordCount {
|
||||||
|
maximumPartitionOffset = recordCount
|
||||||
|
}
|
||||||
|
input := SlidingWindowWorkflowInput{
|
||||||
|
PageSize: input.PageSize,
|
||||||
|
SlidingWindowSize: windowSizes[i],
|
||||||
|
Offset: offset, // inclusive
|
||||||
|
MaximumOffset: maximumPartitionOffset, // exclusive
|
||||||
|
}
|
||||||
|
child := workflow.ExecuteChildWorkflow(childCtx, SlidingWindowWorkflow, input)
|
||||||
|
results = append(results, child)
|
||||||
|
offset += partitions[i]
|
||||||
|
}
|
||||||
|
// Waits for all child workflows to complete
|
||||||
|
result := 0
|
||||||
|
for _, partitionResult := range results {
|
||||||
|
var r int
|
||||||
|
err := partitionResult.Get(ctx, &r) // blocks until the child completion
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
result += r
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
if input.SlidingWindowSize < input.Partitions {
|
func divideIntoPartitions(number int, n int) []int {
|
||||||
return 0, temporal.NewApplicationError(
|
base := number / n
|
||||||
"SlidingWindowSize cannot be less than number of partitions", "invalidInput")
|
remainder := number % n
|
||||||
}
|
partitions := make([]int, n)
|
||||||
partitions := divideIntoPartitions(recordCount, input.Partitions)
|
|
||||||
windowSizes := divideIntoPartitions(input.SlidingWindowSize, input.Partitions)
|
|
||||||
|
|
||||||
workflow.GetLogger(ctx).Info("ProcessBatchWorkflow",
|
for i := 0; i < n; i++ {
|
||||||
"input", input,
|
partitions[i] = base
|
||||||
"recordCount", recordCount,
|
|
||||||
"partitions", partitions,
|
|
||||||
"windowSizes", windowSizes)
|
|
||||||
|
|
||||||
var results []workflow.ChildWorkflowFuture
|
|
||||||
offset := 0
|
|
||||||
for i := 0; i < input.Partitions; i++ {
|
|
||||||
// Makes child id more user-friendly
|
|
||||||
childId := fmt.Sprintf("%s/%d", workflow.GetInfo(ctx).WorkflowExecution.ID, i)
|
|
||||||
childCtx := workflow.WithChildOptions(ctx, workflow.ChildWorkflowOptions{WorkflowID: childId})
|
|
||||||
// Define partition boundaries.
|
|
||||||
maximumPartitionOffset := offset + partitions[i]
|
|
||||||
if maximumPartitionOffset > recordCount {
|
|
||||||
maximumPartitionOffset = recordCount
|
|
||||||
}
|
|
||||||
input := SlidingWindowWorkflowInput{
|
|
||||||
PageSize: input.PageSize,
|
|
||||||
SlidingWindowSize: windowSizes[i],
|
|
||||||
Offset: offset, // inclusive
|
|
||||||
MaximumOffset: maximumPartitionOffset, // exclusive
|
|
||||||
}
|
|
||||||
child := workflow.ExecuteChildWorkflow(childCtx, SlidingWindowWorkflow, input)
|
|
||||||
results = append(results, child)
|
|
||||||
offset += partitions[i]
|
|
||||||
}
|
|
||||||
// Waits for all child workflows to complete
|
|
||||||
result := 0
|
|
||||||
for _, partitionResult := range results {
|
|
||||||
var r int
|
|
||||||
err := partitionResult.Get(ctx, &r) // blocks until the child completion
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
result += r
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func divideIntoPartitions(number int, n int) []int {
|
for i := 0; i < remainder; i++ {
|
||||||
base := number / n
|
partitions[i] += 1
|
||||||
remainder := number % n
|
|
||||||
partitions := make([]int, n)
|
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
partitions[i] = base
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < remainder; i++ {
|
|
||||||
partitions[i] += 1
|
|
||||||
}
|
|
||||||
|
|
||||||
return partitions
|
|
||||||
}
|
}
|
||||||
```
|
|
||||||
|
|
||||||
Result:
|
return partitions
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
```
|
Result:
|
||||||
File: /Users/bogdanbuduroiu/development/temporalio/samples-go/batch-
|
|
||||||
sliding-window/batch_workflow.go
|
|
||||||
Language: go
|
|
||||||
|
|
||||||
class ProcessBatchWorkflowInput {
|
```
|
||||||
};
|
File: /Users/bogdanbuduroiu/development/temporalio/samples-go/batch-
|
||||||
var PageSize;
|
sliding-window/batch_workflow.go
|
||||||
var SlidingWindowSize;
|
Language: go
|
||||||
var Partitions;
|
|
||||||
func ProcessBatchWorkflow() -> void;
|
class ProcessBatchWorkflowInput {
|
||||||
func divideIntoPartitions() -> void;
|
};
|
||||||
```
|
var PageSize;
|
||||||
|
var SlidingWindowSize;
|
||||||
|
var Partitions;
|
||||||
|
func ProcessBatchWorkflow() -> void;
|
||||||
|
func divideIntoPartitions() -> void;
|
||||||
|
```
|
||||||
</details>
|
</details>
|
||||||
|
|
Loading…
Add table
Reference in a new issue