-
Notifications
You must be signed in to change notification settings - Fork 870
Create Channels from DAG #261
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -9,6 +9,7 @@ import ( | |
| "os" | ||
| "path/filepath" | ||
| "strings" | ||
| "sync" | ||
| "time" | ||
|
|
||
| storetypes "github.com/cosmos/cosmos-sdk/store/types" | ||
|
|
@@ -30,6 +31,7 @@ import ( | |
| servertypes "github.com/cosmos/cosmos-sdk/server/types" | ||
| "github.com/cosmos/cosmos-sdk/simapp" | ||
| sdk "github.com/cosmos/cosmos-sdk/types" | ||
| sdkacltypes "github.com/cosmos/cosmos-sdk/types/accesscontrol" | ||
| "github.com/cosmos/cosmos-sdk/types/module" | ||
| "github.com/cosmos/cosmos-sdk/version" | ||
| "github.com/cosmos/cosmos-sdk/x/auth" | ||
|
|
@@ -38,6 +40,7 @@ import ( | |
| aclmodule "github.com/cosmos/cosmos-sdk/x/accesscontrol" | ||
| aclkeeper "github.com/cosmos/cosmos-sdk/x/accesscontrol/keeper" | ||
| acltypes "github.com/cosmos/cosmos-sdk/x/accesscontrol/types" | ||
|
|
||
| authrest "github.com/cosmos/cosmos-sdk/x/auth/client/rest" | ||
| authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" | ||
| authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation" | ||
|
|
@@ -881,11 +884,11 @@ func (app *App) BuildDependencyDag(ctx sdk.Context, txs [][]byte) (*Dag, error) | |
| return nil, err | ||
| } | ||
| msgs := tx.GetMsgs() | ||
| for _, msg := range msgs { | ||
| for messageIndex, msg := range msgs { | ||
| msgDependencies := app.AccessControlKeeper.GetResourceDependencyMapping(ctx, acltypes.GenerateMessageKey(msg)) | ||
| for _, accessOp := range msgDependencies.AccessOps { | ||
| for _, accessOp := range msgDependencies.GetAccessOps() { | ||
| // make a new node in the dependency dag | ||
| dependencyDag.AddNodeBuildDependency(txIndex, accessOp) | ||
| dependencyDag.AddNodeBuildDependency(messageIndex, txIndex, accessOp) | ||
| } | ||
| } | ||
|
|
||
|
|
@@ -926,6 +929,115 @@ func (app *App) FinalizeBlocker(ctx sdk.Context, req *abci.RequestFinalizeBlock) | |
| return &resp, nil | ||
| } | ||
|
|
||
| func (app *App) DeliverTxWithResult(ctx sdk.Context, tx []byte) *abci.ExecTxResult { | ||
| deliverTxResp := app.DeliverTx(ctx, abci.RequestDeliverTx{ | ||
| Tx: tx, | ||
| }) | ||
| return &abci.ExecTxResult{ | ||
| Code: deliverTxResp.Code, | ||
| Data: deliverTxResp.Data, | ||
| Log: deliverTxResp.Log, | ||
| Info: deliverTxResp.Info, | ||
| GasWanted: deliverTxResp.GasWanted, | ||
| GasUsed: deliverTxResp.GasUsed, | ||
| Events: deliverTxResp.Events, | ||
| Codespace: deliverTxResp.Codespace, | ||
| } | ||
| } | ||
|
|
||
| func (app *App) ProcessBlockSynchronous(ctx sdk.Context, txs [][]byte) []*abci.ExecTxResult { | ||
| txResults := []*abci.ExecTxResult{} | ||
| for _, tx := range txs { | ||
| txResults = append(txResults, app.DeliverTxWithResult(ctx, tx)) | ||
| } | ||
| return txResults | ||
| } | ||
|
|
||
| // Returns a mapping of the accessOperation to the channels | ||
| func getChannelsFromSignalMapping(signalMapping MessageCompletionSignalMapping) sdkacltypes.MessageAccessOpsChannelMapping { | ||
| channelsMapping := make(sdkacltypes.MessageAccessOpsChannelMapping) | ||
| for messageIndex, accessOperationsToSignal := range signalMapping { | ||
| for accessOperation, completionSignals := range accessOperationsToSignal { | ||
| var channels []chan interface{} | ||
| for _, completionSignal := range completionSignals { | ||
| channels = append(channels, completionSignal.Channel) | ||
| } | ||
| channelsMapping[messageIndex][accessOperation] = channels | ||
| } | ||
| } | ||
| return channelsMapping | ||
| } | ||
|
|
||
| type ChannelResult struct { | ||
| txIndex int | ||
| result *abci.ExecTxResult | ||
| } | ||
|
|
||
| func (app *App) ProcessTxConcurrent( | ||
| ctx sdk.Context, | ||
| txIndex int, | ||
| txBytes []byte, | ||
| wg *sync.WaitGroup, | ||
| resultChan chan<- ChannelResult, | ||
| txCompletionSignalingMap MessageCompletionSignalMapping, | ||
| txBlockingSignalsMap MessageCompletionSignalMapping, | ||
| ) { | ||
| defer wg.Done() | ||
| // Store the Channels in the Context Object for each transaction | ||
| ctx.WithTxBlockingChannels(getChannelsFromSignalMapping(txBlockingSignalsMap)) | ||
| ctx.WithTxCompletionChannels(getChannelsFromSignalMapping(txCompletionSignalingMap)) | ||
|
|
||
| // Deliver the transaction and store the result in the channel | ||
| resultChan <- ChannelResult{txIndex, app.DeliverTxWithResult(ctx, txBytes)} | ||
| } | ||
|
|
||
| func (app *App) ProcessBlockConcurrent( | ||
| ctx sdk.Context, | ||
| txs [][]byte, | ||
| completionSignalingMap map[int]MessageCompletionSignalMapping, | ||
| blockingSignalsMap map[int]MessageCompletionSignalMapping, | ||
| ) []*abci.ExecTxResult { | ||
| var waitGroup sync.WaitGroup | ||
| resultChan := make(chan ChannelResult) | ||
| txResults := []*abci.ExecTxResult{} | ||
|
|
||
| // If there's no transactions then return empty results | ||
| if len(txs) == 0 { | ||
| return txResults | ||
| } | ||
|
|
||
| // For each transaction, start goroutine and deliver TX | ||
| for txIndex, txBytes := range txs { | ||
| waitGroup.Add(1) | ||
| go app.ProcessTxConcurrent( | ||
| ctx, | ||
| txIndex, | ||
| txBytes, | ||
| &waitGroup, | ||
| resultChan, | ||
| completionSignalingMap[txIndex], | ||
| blockingSignalsMap[txIndex], | ||
| ) | ||
| } | ||
|
|
||
| // Waits for all the transactions to complete | ||
| waitGroup.Wait() | ||
|
|
||
| // Gather Results and store it based on txIndex | ||
| // Concurrent results may be in different order than the original txIndex | ||
| txResultsMap := map[int]*abci.ExecTxResult{} | ||
| for result := range resultChan { | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this isnt necessarily going to be in the same order as txs that were provided, right? is that safe, or should we also include the tx index with the results so we can sort into the tx index order prior to returning Tx results?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, this will be in the order that it's completed in - let me double check if the ordering matters |
||
| txResultsMap[result.txIndex] = result.result | ||
| } | ||
|
|
||
| // Gather Results and store in array based on txIndex to preserve ordering | ||
| for txIndex := range txs { | ||
| txResults = append(txResults, txResultsMap[txIndex]) | ||
| } | ||
|
|
||
| return txResults | ||
| } | ||
|
|
||
| func (app *App) ProcessBlock(ctx sdk.Context, txs [][]byte, req BlockProcessRequest, lastCommit abci.CommitInfo) ([]abci.Event, []*abci.ExecTxResult, abci.ResponseEndBlock, error) { | ||
| goCtx := app.decorateContextWithDexMemState(ctx.Context()) | ||
| ctx = ctx.WithContext(goCtx) | ||
|
|
@@ -967,30 +1079,18 @@ func (app *App) ProcessBlock(ctx sdk.Context, txs [][]byte, req BlockProcessRequ | |
| // } | ||
| // app.batchVerifier.VerifyTxs(ctx, typedTxs) | ||
|
|
||
| dag, err := app.BuildDependencyDag(ctx, txs) | ||
| dependencyDag, err := app.BuildDependencyDag(ctx, txs) | ||
| txResults := []*abci.ExecTxResult{} | ||
|
|
||
| // TODO:: add metrics for async vs sync | ||
| if err != nil { | ||
| // something went wrong in dag, process txs sequentially | ||
| for _, tx := range txs { | ||
| // ctx = ctx.WithContext(context.WithValue(ctx.Context(), ante.ContextKeyTxIndexKey, i)) | ||
| deliverTxResp := app.DeliverTx(ctx, abci.RequestDeliverTx{ | ||
| Tx: tx, | ||
| }) | ||
| txResults = append(txResults, &abci.ExecTxResult{ | ||
| Code: deliverTxResp.Code, | ||
| Data: deliverTxResp.Data, | ||
| Log: deliverTxResp.Log, | ||
| Info: deliverTxResp.Info, | ||
| GasWanted: deliverTxResp.GasWanted, | ||
| GasUsed: deliverTxResp.GasUsed, | ||
| Events: deliverTxResp.Events, | ||
| Codespace: deliverTxResp.Codespace, | ||
| }) | ||
| ctx.Logger().Error(fmt.Sprintf("Error while building DAG, processing synchronously: %s", err)) | ||
| if err == ErrCycleInDAG { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. would be good to add metrics over how many times we process synchronously vs concurrently
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'll add a todo here, looks like we don't have the telemetry lib from master. It would be easier to do once we merge this into master |
||
| txResults = app.ProcessBlockSynchronous(ctx, txs) | ||
| } | ||
| } else { | ||
| // no error, lets process txs concurrently | ||
| _, _ = dag.BuildCompletionSignalMaps() | ||
| // TODO: create channel map here | ||
| completionSignalingMap, blockingSignalsMap := dependencyDag.BuildCompletionSignalMaps() | ||
| txResults = app.ProcessBlockConcurrent(ctx, txs, completionSignalingMap, blockingSignalsMap) | ||
| } | ||
|
|
||
| endBlockResp := app.EndBlock(ctx, abci.RequestEndBlock{ | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -3,13 +3,14 @@ package app | |
| import ( | ||
| "fmt" | ||
|
|
||
| acltypes "github.com/cosmos/cosmos-sdk/x/accesscontrol/types" | ||
| acltypes "github.com/cosmos/cosmos-sdk/types/accesscontrol" | ||
| ) | ||
|
|
||
| type DagNodeID int | ||
|
|
||
| type DagNode struct { | ||
| NodeID DagNodeID | ||
| MessageIndex int | ||
| TxIndex int | ||
| AccessOperation acltypes.AccessOperation | ||
| } | ||
|
|
@@ -27,6 +28,17 @@ type Dag struct { | |
| NextID DagNodeID | ||
| } | ||
|
|
||
| // Alias for mapping MessageIndexId -> AccessOperations -> CompletionSignals | ||
| type MessageCompletionSignalMapping = map[int]map[acltypes.AccessOperation][]CompletionSignal | ||
|
|
||
| type CompletionSignal struct { | ||
| FromNodeID DagNodeID | ||
| ToNodeID DagNodeID | ||
| CompletionAccessOperation acltypes.AccessOperation // this is the access operation that must complete in order to send the signal | ||
| BlockedAccessOperation acltypes.AccessOperation // this is the access operation that is blocked by the completion access operation | ||
| Channel chan interface{} | ||
| } | ||
|
|
||
| func (dag *Dag) GetCompletionSignal(edge DagEdge) *CompletionSignal { | ||
| // only if tx indexes are different | ||
| fromNode := dag.NodeMap[edge.FromNodeID] | ||
|
|
@@ -39,6 +51,8 @@ func (dag *Dag) GetCompletionSignal(edge DagEdge) *CompletionSignal { | |
| ToNodeID: toNode.NodeID, | ||
| CompletionAccessOperation: fromNode.AccessOperation, | ||
| BlockedAccessOperation: toNode.AccessOperation, | ||
| // channel used for signalling | ||
| Channel: make(chan interface{}), | ||
| } | ||
| } | ||
|
|
||
|
|
@@ -68,9 +82,10 @@ func NewDag() Dag { | |
| } | ||
| } | ||
|
|
||
| func (dag *Dag) AddNode(txIndex int, accessOp acltypes.AccessOperation) DagNode { | ||
| func (dag *Dag) AddNode(messageIndex int, txIndex int, accessOp acltypes.AccessOperation) DagNode { | ||
| dagNode := DagNode{ | ||
| NodeID: dag.NextID, | ||
| MessageIndex: messageIndex, | ||
| TxIndex: txIndex, | ||
| AccessOperation: accessOp, | ||
| } | ||
|
|
@@ -101,8 +116,8 @@ func (dag *Dag) AddEdge(fromIndex DagNodeID, toIndex DagNodeID) *DagEdge { | |
| // that will allow the dependent goroutines to cordinate execution safely. | ||
| // | ||
| // It will also register the new node with AccessOpsMap so that future nodes that amy be dependent on this one can properly identify the dependency. | ||
| func (dag *Dag) AddNodeBuildDependency(txIndex int, accessOp acltypes.AccessOperation) { | ||
| dagNode := dag.AddNode(txIndex, accessOp) | ||
| func (dag *Dag) AddNodeBuildDependency(messageIndex int, txIndex int, accessOp acltypes.AccessOperation) { | ||
| dagNode := dag.AddNode(messageIndex, txIndex, accessOp) | ||
| // if in TxIndexMap, make an edge from the previous node index | ||
| if lastTxNodeID, ok := dag.TxIndexMap[txIndex]; ok { | ||
| // TODO: we actually don't necessarily need these edges, but keeping for now so we can first determine that cycles can't be missed if we remove these | ||
|
|
@@ -186,7 +201,10 @@ func (dag *Dag) GetNodeDependencies(node DagNode) (nodeDependencies []DagNode) { | |
| } | ||
|
|
||
| // returns completion signaling map and blocking signals map | ||
| func (dag *Dag) BuildCompletionSignalMaps() (completionSignalingMap map[int]map[acltypes.AccessOperation][]CompletionSignal, blockingSignalsMap map[int]map[acltypes.AccessOperation][]CompletionSignal) { | ||
| func (dag *Dag) BuildCompletionSignalMaps() ( | ||
| completionSignalingMap map[int]MessageCompletionSignalMapping, | ||
| blockingSignalsMap map[int]MessageCompletionSignalMapping, | ||
| ) { | ||
| // go through every node | ||
| for _, node := range dag.NodeMap { | ||
| // for each node, assign its completion signaling, and also assign blocking signals for the destination nodes | ||
|
|
@@ -195,11 +213,15 @@ func (dag *Dag) BuildCompletionSignalMaps() (completionSignalingMap map[int]map[ | |
| maybeCompletionSignal := dag.GetCompletionSignal(edge) | ||
| if maybeCompletionSignal != nil { | ||
| completionSignal := *maybeCompletionSignal | ||
|
|
||
| // add it to the right blocking signal in the right txindex | ||
| toNode := dag.NodeMap[edge.ToNodeID] | ||
| blockingSignalsMap[toNode.TxIndex][completionSignal.BlockedAccessOperation] = append(blockingSignalsMap[toNode.TxIndex][completionSignal.BlockedAccessOperation], completionSignal) | ||
| prevBlockSignalMapping := blockingSignalsMap[toNode.TxIndex][toNode.MessageIndex][completionSignal.BlockedAccessOperation] | ||
| blockingSignalsMap[toNode.TxIndex][toNode.MessageIndex][completionSignal.BlockedAccessOperation] = append(prevBlockSignalMapping, completionSignal) | ||
|
|
||
| // add it to the completion signal for the tx index | ||
| completionSignalingMap[node.TxIndex][completionSignal.CompletionAccessOperation] = append(completionSignalingMap[node.TxIndex][completionSignal.CompletionAccessOperation], completionSignal) | ||
| prevCompletionSignalMapping := completionSignalingMap[node.TxIndex][node.MessageIndex][completionSignal.CompletionAccessOperation] | ||
| completionSignalingMap[node.TxIndex][node.MessageIndex][completionSignal.CompletionAccessOperation] = append(prevCompletionSignalMapping, completionSignal) | ||
| } | ||
|
|
||
| } | ||
|
|
@@ -208,11 +230,4 @@ func (dag *Dag) BuildCompletionSignalMaps() (completionSignalingMap map[int]map[ | |
| return | ||
| } | ||
|
|
||
| type CompletionSignal struct { | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Moving this up with all the other types |
||
| FromNodeID DagNodeID | ||
| ToNodeID DagNodeID | ||
| CompletionAccessOperation acltypes.AccessOperation // this is the access operation that must complete in order to send the signal | ||
| BlockedAccessOperation acltypes.AccessOperation // this is the access operation that is blocked by the completion access operation | ||
| } | ||
|
|
||
| var ErrCycleInDAG = fmt.Errorf("cycle detected in DAG") | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
can we add TODOs to block on blocking channels and writing to completion channels to properly block and release resources
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
added here: https://github.com/sei-protocol/sei-chain/pull/261/files#diff-0f1d2976054440336a576d47a44a37b80cdf6701dd9113012bce0e3c425819b7R968