Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
146 changes: 72 additions & 74 deletions vsintegration/src/FSharp.Editor/Common/CommonHelpers.fs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

namespace Microsoft.VisualStudio.FSharp.Editor

#nowarn "1182"

open System
open System.Collections.Generic
open System.Threading
Expand Down Expand Up @@ -73,13 +75,6 @@ module internal CommonHelpers =
data.[i] <- None
i <- i + 1

/// Go backwards to find the last cached scanned line that is valid.
member x.GetLastValidCachedLine (startLine: int, sourceLines: TextLineCollection) : int =
let mutable i = startLine
while i > 0 && (match x.[i] with Some data -> not (data.IsValid(sourceLines.[i])) | None -> true) do
i <- i - 1
i

let private dataCache = ConditionalWeakTable<DocumentId, SourceTextData>()

let internal compilerTokenToRoslynToken(colorKind: FSharpTokenColorKind) : string =
Expand Down Expand Up @@ -133,72 +128,66 @@ module internal CommonHelpers =

SourceLineData(textLine.Start, lexState, previousLexState.Value, lineContents.GetHashCode(), classifiedSpans, List.ofSeq tokens)

let private getSourceLineDatas(documentKey: DocumentId, sourceText: SourceText, startLine: int, endLine: int, fileName: string option, defines: string list,
cancellationToken: CancellationToken) : ResizeArray<SourceLineData> =
let sourceTokenizer = FSharpSourceTokenizer(defines, fileName)
let lines = sourceText.Lines
// We keep incremental data per-document. When text changes we correlate text line-by-line (by hash codes of lines)
let sourceTextData = dataCache.GetValue(documentKey, fun key -> SourceTextData(lines.Count))
let scanStartLine = sourceTextData.GetLastValidCachedLine(startLine, lines)

// Rescan the lines if necessary and report the information
let result = ResizeArray()
let mutable lexState = if scanStartLine = 0 then 0L else sourceTextData.[scanStartLine].Value.LexStateAtEndOfLine

for i = scanStartLine to endLine do
cancellationToken.ThrowIfCancellationRequested()
let textLine = lines.[i]
let lineContents = textLine.Text.ToString(textLine.Span)

let lineData =
// We can reuse the old data when
// 1. the line starts at the same overall position
// 2. the hash codes match
// 3. the start-of-line lex states are the same
match sourceTextData.[i] with
| Some data when data.IsValid(textLine) && data.LexStateAtStartOfLine = lexState ->
data
| _ ->
// Otherwise, we recompute
let newData = scanSourceLine(sourceTokenizer, textLine, lineContents, lexState)
sourceTextData.[i] <- Some newData
newData

lexState <- lineData.LexStateAtEndOfLine

if startLine <= i then
result.Add(lineData)

// If necessary, invalidate all subsequent lines after endLine
if endLine < lines.Count - 1 then
match sourceTextData.[endLine+1] with
| Some data ->
if data.LexStateAtStartOfLine <> lexState then
sourceTextData.ClearFrom (endLine+1)
| None -> ()

result

let getColorizationData(documentKey: DocumentId, sourceText: SourceText, textSpan: TextSpan, fileName: string option, defines: string list,
cancellationToken: CancellationToken) : List<ClassifiedSpan> =
try
let lines = sourceText.Lines
let startLine = lines.GetLineFromPosition(textSpan.Start).LineNumber
let endLine = lines.GetLineFromPosition(textSpan.End).LineNumber

// Rescan the lines if necessary and report the information
let result = new List<ClassifiedSpan>()
for lineData in getSourceLineDatas(documentKey, sourceText, startLine, endLine, fileName, defines, cancellationToken) do
result.AddRange(lineData.ClassifiedSpans |> Seq.filter(fun token ->
textSpan.Contains(token.TextSpan.Start) ||
textSpan.Contains(token.TextSpan.End - 1) ||
(token.TextSpan.Start <= textSpan.Start && textSpan.End <= token.TextSpan.End)))
result
with
| :? System.OperationCanceledException -> reraise()
| ex ->
Assert.Exception(ex)
List<ClassifiedSpan>()
try
let sourceTokenizer = FSharpSourceTokenizer(defines, fileName)
let lines = sourceText.Lines
// We keep incremental data per-document. When text changes we correlate text line-by-line (by hash codes of lines)
let sourceTextData = dataCache.GetValue(documentKey, fun key -> SourceTextData(lines.Count))

let startLine = lines.GetLineFromPosition(textSpan.Start).LineNumber
let endLine = lines.GetLineFromPosition(textSpan.End).LineNumber
// Go backwards to find the last cached scanned line that is valid
let scanStartLine =
let mutable i = startLine
while i > 0 && (match sourceTextData.[i] with Some data -> not (data.IsValid(lines.[i])) | None -> true) do
i <- i - 1
i
// Rescan the lines if necessary and report the information
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

scanStartLine made my head explode or at least the while loop condition did.
I don't know whether this is any more readable though:

             let scanStartLine =
                 let rec scanStart i =
                     if i > 0 && (Option.isNone (sourceTextData.[i]) || not (sourceTextData.[i].Value.IsValid(lines.[i]))) then scanStart (i - 1)
                     else i
                 scanStart startLine

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm 100% with you on this. It must be covered by tests, then rewritten in a more functional and safe manner. The number of things this algorithm operates is not as huge to make the code so imperative.

let result = new List<ClassifiedSpan>()
let mutable lexState = if scanStartLine = 0 then 0L else sourceTextData.[scanStartLine - 1].Value.LexStateAtEndOfLine

for i = scanStartLine to endLine do
cancellationToken.ThrowIfCancellationRequested()
let textLine = lines.[i]
let lineContents = textLine.Text.ToString(textLine.Span)

let lineData =
// We can reuse the old data when
// 1. the line starts at the same overall position
// 2. the hash codes match
// 3. the start-of-line lex states are the same
match sourceTextData.[i] with
| Some data when data.IsValid(textLine) && data.LexStateAtStartOfLine = lexState ->
data
| _ ->
// Otherwise, we recompute
let newData = scanSourceLine(sourceTokenizer, textLine, lineContents, lexState)
sourceTextData.[i] <- Some newData
newData

lexState <- lineData.LexStateAtEndOfLine

if startLine <= i then
result.AddRange(lineData.ClassifiedSpans |> Seq.filter(fun token ->
textSpan.Contains(token.TextSpan.Start) ||
textSpan.Contains(token.TextSpan.End - 1) ||
(token.TextSpan.Start <= textSpan.Start && textSpan.End <= token.TextSpan.End)))

// If necessary, invalidate all subsequent lines after endLine
if endLine < lines.Count - 1 then
match sourceTextData.[endLine+1] with
| Some data ->
if data.LexStateAtStartOfLine <> lexState then
sourceTextData.ClearFrom (endLine+1)
| None -> ()
result
with
| :? System.OperationCanceledException -> reraise()
| ex ->
Assert.Exception(ex)
List<ClassifiedSpan>()

type private DraftToken =
{ Kind: LexerSymbolKind
Expand Down Expand Up @@ -328,14 +317,22 @@ module internal CommonHelpers =
let private getCachedSourceLineData(documentKey: DocumentId, sourceText: SourceText, position: int, fileName: string, defines: string list) =
let textLine = sourceText.Lines.GetLineFromPosition(position)
let textLinePos = sourceText.Lines.GetLinePosition(position)
let lineNumber = textLinePos.Line
let lineNumber = textLinePos.Line + 1 // FCS line number
let sourceTokenizer = FSharpSourceTokenizer(defines, Some fileName)
let lines = sourceText.Lines
// We keep incremental data per-document. When text changes we correlate text line-by-line (by hash codes of lines)
let sourceTextData = dataCache.GetValue(documentKey, fun key -> SourceTextData(lines.Count))
// Go backwards to find the last cached scanned line that is valid
let scanStartLine = sourceTextData.GetLastValidCachedLine(lineNumber, lines)
let lexState = if scanStartLine = 0 then 0L else sourceTextData.[scanStartLine].Value.LexStateAtEndOfLine
let scanStartLine =
let mutable i = min (lines.Count - 1) lineNumber
while i > 0 &&
(match sourceTextData.[i] with
| Some data -> not (data.IsValid(lines.[i]))
| None -> true
) do
i <- i - 1
i
let lexState = if scanStartLine = 0 then 0L else sourceTextData.[scanStartLine - 1].Value.LexStateAtEndOfLine
let lineContents = textLine.Text.ToString(textLine.Span)

// We can reuse the old data when
Expand Down Expand Up @@ -363,6 +360,7 @@ module internal CommonHelpers =
let getSymbolAtPosition(documentKey: DocumentId, sourceText: SourceText, position: int, fileName: string, defines: string list, lookupKind: SymbolLookupKind) : LexerSymbol option =
try
let lineData, textLinePos, lineContents = getCachedSourceLineData(documentKey, sourceText, position, fileName, defines)
let sourceTokenizer = FSharpSourceTokenizer(defines, Some fileName)
getSymbolFromTokens(fileName, lineData.Tokens, textLinePos, lineContents, lookupKind)
with
| :? System.OperationCanceledException -> reraise()
Expand Down
2 changes: 1 addition & 1 deletion vsintegration/src/FSharp.Editor/FSharp.Editor.fsproj
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@
<Compile Include="Common\Pervasive.fs" />
<Compile Include="Common\CommonConstants.fs" />
<Compile Include="Common\CommonRoslynHelpers.fs" />
<Compile Include="Common\CommonHelpers.fs" />
<Compile Include="Common\Logging.fs" />
<Compile Include="Common\CommonHelpers.fs" />
<Compile Include="Common\ContentType.fs" />
<Compile Include="Common\LanguageService.fs" />
<Compile Include="Common\SymbolHelpers.fs" />
Expand Down