diff --git a/.github/labeler.yml b/.github/labeler.yml
new file mode 100644
index 0000000000000..9359bd0ca70a6
--- /dev/null
+++ b/.github/labeler.yml
@@ -0,0 +1,3 @@
+# See https://github.com/actions/labeler
+port-to-master: '**'
+port-to-v1.10: '**'
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 100644
index 0000000000000..7ed825781c53b
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1,14 @@
+
+## PR Description
+
+_What does this PR do?_
+
+## Checklist
+
+Requirements for merging:
+- [ ] I have opened an issue or PR upstream on JuliaLang/julia:
+- [ ] I have removed the `port-to-*` labels that don't apply.
+- [ ] I have opened a PR on raicode to test these changes:
diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml
new file mode 100644
index 0000000000000..2141a906e96cd
--- /dev/null
+++ b/.github/workflows/labeler.yml
@@ -0,0 +1,17 @@
+# See https://github.com/actions/labeler
+name: "Pull Request Labeler"
+on:
+ pull_request_target:
+ types:
+ - opened
+
+jobs:
+ triage:
+ permissions:
+ contents: read
+ pull-requests: write
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/labeler@v4
+ with:
+ dot: true
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
new file mode 100644
index 0000000000000..3df2093491753
--- /dev/null
+++ b/.github/workflows/stale.yml
@@ -0,0 +1,16 @@
+name: "Close stale PRs"
+on:
+ schedule:
+ - cron: "0 0 * * *" # every night at midnight
+
+jobs:
+ stale:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/stale@v8
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ stale-pr-message: 'This PR is stale because it has been open 30 days with no activity. Comment or remove stale label, or this PR will be closed in 5 days.'
+ days-before-stale: 30
+ days-before-close: 5
+ stale-pr-label: 'stale'
diff --git a/.github/workflows/update-upstream-branches.yml b/.github/workflows/update-upstream-branches.yml
new file mode 100644
index 0000000000000..247000bbd42cd
--- /dev/null
+++ b/.github/workflows/update-upstream-branches.yml
@@ -0,0 +1,28 @@
+name: "Update upstream branches"
+on:
+ schedule:
+ - cron: "0 0 * * *" # every night at midnight
+ workflow_dispatch:
+
+jobs:
+ PullUpstream:
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false # run all jobs in the matrix even if one fails
+ matrix:
+ branch:
+ - "master"
+ - "backports-release-1.10"
+ steps:
+ - name: Checkout RAI/julia
+ uses: actions/checkout@v3
+ with:
+ ref: ${{ matrix.branch }}
+ - name: Update ${{ matrix.branch }}
+ run: |
+ git config --global user.email "julia-engineering@relational.ai"
+ git config --global user.name "RAI CI (GitHub Action Automation)"
+
+ git remote add upstream https://github.com/JuliaLang/julia
+ git pull upstream ${{ matrix.branch }}
+ git push origin ${{ matrix.branch }}
diff --git a/Compiler/extras/CompilerDevTools/src/CompilerDevTools.jl b/Compiler/extras/CompilerDevTools/src/CompilerDevTools.jl
index 5d0df5ccaa4e4..dd32564d7fa8d 100644
--- a/Compiler/extras/CompilerDevTools/src/CompilerDevTools.jl
+++ b/Compiler/extras/CompilerDevTools/src/CompilerDevTools.jl
@@ -9,12 +9,13 @@ struct SplitCacheInterp <: Compiler.AbstractInterpreter
inf_params::Compiler.InferenceParams
opt_params::Compiler.OptimizationParams
inf_cache::Vector{Compiler.InferenceResult}
+ codegen_cache::IdDict{CodeInstance,CodeInfo}
function SplitCacheInterp(;
world::UInt = Base.get_world_counter(),
inf_params::Compiler.InferenceParams = Compiler.InferenceParams(),
opt_params::Compiler.OptimizationParams = Compiler.OptimizationParams(),
inf_cache::Vector{Compiler.InferenceResult} = Compiler.InferenceResult[])
- new(world, inf_params, opt_params, inf_cache)
+ new(world, inf_params, opt_params, inf_cache, IdDict{CodeInstance,CodeInfo}())
end
end
@@ -23,10 +24,11 @@ Compiler.OptimizationParams(interp::SplitCacheInterp) = interp.opt_params
Compiler.get_inference_world(interp::SplitCacheInterp) = interp.world
Compiler.get_inference_cache(interp::SplitCacheInterp) = interp.inf_cache
Compiler.cache_owner(::SplitCacheInterp) = SplitCacheOwner()
+Compiler.codegen_cache(interp::SplitCacheInterp) = interp.codegen_cache
import Core.OptimizedGenerics.CompilerPlugins: typeinf, typeinf_edge
@eval @noinline typeinf(::SplitCacheOwner, mi::MethodInstance, source_mode::UInt8) =
- Base.invoke_in_world(which(typeinf, Tuple{SplitCacheOwner, MethodInstance, UInt8}).primary_world, Compiler.typeinf_ext, SplitCacheInterp(; world=Base.tls_world_age()), mi, source_mode)
+ Base.invoke_in_world(which(typeinf, Tuple{SplitCacheOwner, MethodInstance, UInt8}).primary_world, Compiler.typeinf_ext_toplevel, SplitCacheInterp(; world=Base.tls_world_age()), mi, source_mode)
@eval @noinline function typeinf_edge(::SplitCacheOwner, mi::MethodInstance, parent_frame::Compiler.InferenceState, world::UInt, source_mode::UInt8)
# TODO: This isn't quite right, we're just sketching things for now
diff --git a/Compiler/src/Compiler.jl b/Compiler/src/Compiler.jl
index 0fc7bd6e328e7..981001cb2fbe6 100644
--- a/Compiler/src/Compiler.jl
+++ b/Compiler/src/Compiler.jl
@@ -35,10 +35,6 @@ else
@eval baremodule Compiler
-# Needs to match UUID defined in Project.toml
-ccall(:jl_set_module_uuid, Cvoid, (Any, NTuple{2, UInt64}), Compiler,
- (0x807dbc54_b67e_4c79, 0x8afb_eafe4df6f2e1))
-
using Core.Intrinsics, Core.IR
using Core: ABIOverride, Builtin, CodeInstance, IntrinsicFunction, MethodInstance, MethodMatch,
@@ -49,7 +45,8 @@ using Core: ABIOverride, Builtin, CodeInstance, IntrinsicFunction, MethodInstanc
using Base
using Base: @_foldable_meta, @_gc_preserve_begin, @_gc_preserve_end, @nospecializeinfer,
- BINDING_KIND_GLOBAL, BINDING_KIND_UNDEF_CONST, BINDING_KIND_BACKDATED_CONST,
+ PARTITION_KIND_GLOBAL, PARTITION_KIND_UNDEF_CONST, PARTITION_KIND_BACKDATED_CONST, PARTITION_KIND_DECLARED,
+ PARTITION_FLAG_DEPWARN,
Base, BitVector, Bottom, Callable, DataTypeFieldDesc,
EffectsOverride, Filter, Generator, IteratorSize, JLOptions, NUM_EFFECTS_OVERRIDES,
OneTo, Ordering, RefValue, SizeUnknown, _NAMEDTUPLE_NAME,
@@ -60,7 +57,7 @@ using Base: @_foldable_meta, @_gc_preserve_begin, @_gc_preserve_end, @nospeciali
generating_output, get_nospecializeinfer_sig, get_world_counter, has_free_typevars,
hasgenerator, hasintersect, indexed_iterate, isType, is_file_tracked, is_function_def,
is_meta_expr, is_meta_expr_head, is_nospecialized, is_nospecializeinfer, is_defined_const_binding,
- is_some_const_binding, is_some_guard, is_some_imported, is_valid_intrinsic_elptr,
+ is_some_const_binding, is_some_guard, is_some_imported, is_some_explicit_imported, is_some_binding_imported, is_valid_intrinsic_elptr,
isbitsunion, isconcretedispatch, isdispatchelem, isexpr, isfieldatomic, isidentityfree,
iskindtype, ismutabletypename, ismutationfree, issingletontype, isvarargtype, isvatuple,
kwerr, lookup_binding_partition, may_invoke_generator, methods, midpoint, moduleroot,
@@ -72,7 +69,11 @@ using Base.Order
import Base: ==, _topmod, append!, convert, copy, copy!, findall, first, get, get!,
getindex, haskey, in, isempty, isready, iterate, iterate, last, length, max_world,
- min_world, popfirst!, push!, resize!, setindex!, size
+ min_world, popfirst!, push!, resize!, setindex!, size, intersect
+
+# Needs to match UUID defined in Project.toml
+ccall(:jl_set_module_uuid, Cvoid, (Any, NTuple{2, UInt64}), Compiler,
+ (0x807dbc54_b67e_4c79, 0x8afb_eafe4df6f2e1))
const getproperty = Core.getfield
const setproperty! = Core.setfield!
@@ -129,7 +130,7 @@ something(x::Any, y...) = x
############
baremodule BuildSettings
-using Core: ARGS, include
+using Core: ARGS, include, Int, ===
using ..Compiler: >, getindex, length
global MAX_METHODS::Int = 3
@@ -188,21 +189,25 @@ macro __SOURCE_FILE__()
return QuoteNode(__source__.file::Symbol)
end
-module IRShow end
-function load_irshow!()
- if isdefined(Base, :end_base_include)
- # This code path is exclusively for Revise, which may want to re-run this
- # after bootstrap.
- include(IRShow, Base.joinpath(Base.dirname(Base.String(@__SOURCE_FILE__)), "ssair/show.jl"))
- else
+module IRShow end # relies on string and IO operations defined in Base
+baremodule TrimVerifier using Core end # relies on IRShow, so define this afterwards
+
+if isdefined(Base, :end_base_include)
+ # When this module is loaded as the standard library, include these files as usual
+ include(IRShow, "ssair/show.jl")
+ include(TrimVerifier, "verifytrim.jl")
+else
+ function load_irshow!()
+ Base.delete_method(Base.which(verify_typeinf_trim, (IO, Vector{Any}, Bool)),)
include(IRShow, "ssair/show.jl")
+ include(TrimVerifier, "verifytrim.jl")
end
-end
-if !isdefined(Base, :end_base_include)
- # During bootstrap, skip including this file and defer it to base/show.jl to include later
-else
- # When this module is loaded as the standard library, include this file as usual
- load_irshow!()
+ function verify_typeinf_trim(io::IO, codeinfos::Vector{Any}, onlywarn::Bool)
+ # stub implementation
+ msg = "--trim verifier not defined"
+ onlywarn ? println(io, msg) : error(msg)
+ end
+ # During bootstrap, skip including these files and defer to base/show.jl to include it later
end
end # baremodule Compiler
diff --git a/Compiler/src/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl
index fe7cc1dbf5680..9bdd5b50e512e 100644
--- a/Compiler/src/abstractinterpretation.jl
+++ b/Compiler/src/abstractinterpretation.jl
@@ -286,19 +286,12 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(fun
state.rettype = Any
end
# if from_interprocedural added any pclimitations to the set inherited from the arguments,
- # some of those may be part of our cycles, so those can be deleted now
- # TODO: and those might need to be deleted later too if the cycle grows to include them?
if isa(sv, InferenceState)
# TODO (#48913) implement a proper recursion handling for irinterp:
- # This works just because currently the `:terminate` condition guarantees that
- # irinterp doesn't fail into unresolved cycles, but it's not a good solution.
+ # This works most of the time just because currently the `:terminate` condition often guarantees that
+ # irinterp doesn't fail into unresolved cycles, but it is not a good (or working) solution.
# We should revisit this once we have a better story for handling cycles in irinterp.
- if !isempty(sv.pclimitations) # remove self, if present
- delete!(sv.pclimitations, sv)
- for caller in callers_in_cycle(sv)
- delete!(sv.pclimitations, caller)
- end
- end
+ delete!(sv.pclimitations, sv) # remove self, if present
end
else
# there is unanalyzed candidate, widen type and effects to the top
@@ -775,7 +768,7 @@ function edge_matches_sv(interp::AbstractInterpreter, frame::AbsIntState,
# check in the cycle list first
# all items in here are considered mutual parents of all others
if !any(p::AbsIntState->matches_sv(p, sv), callers_in_cycle(frame))
- let parent = frame_parent(frame)
+ let parent = cycle_parent(frame)
parent === nothing && return false
(is_cached(parent) || frame_parent(parent) !== nothing) || return false
matches_sv(parent, sv) || return false
@@ -785,7 +778,7 @@ function edge_matches_sv(interp::AbstractInterpreter, frame::AbsIntState,
# If the method defines a recursion relation, give it a chance
# to tell us that this recursion is actually ok.
if isdefined(method, :recursion_relation)
- if Core._apply_pure(method.recursion_relation, Any[method, callee_method2, sig, frame_instance(frame).specTypes])
+ if Core._call_in_world_total(get_world_counter(), method.recursion_relation, method, callee_method2, sig, frame_instance(frame).specTypes)
return false
end
end
@@ -1359,6 +1352,8 @@ function const_prop_call(interp::AbstractInterpreter,
end
assign_parentchild!(frame, sv)
if !typeinf(interp, frame)
+ sv.time_caches += frame.time_caches
+ sv.time_paused += frame.time_paused
add_remark!(interp, sv, "[constprop] Fresh constant inference hit a cycle")
@assert frame.frameid != 0 && frame.cycleid == frame.frameid
callstack = frame.callstack::Vector{AbsIntState}
@@ -1379,6 +1374,7 @@ function const_prop_call(interp::AbstractInterpreter,
inf_result.result = concrete_eval_result.rt
inf_result.ipo_effects = concrete_eval_result.effects
end
+ typ = inf_result.result
return const_prop_result(inf_result)
end
@@ -1742,8 +1738,8 @@ function abstract_apply(interp::AbstractInterpreter, argtypes::Vector{Any}, si::
retinfos = ApplyCallInfo[]
retinfo = UnionSplitApplyCallInfo(retinfos)
exctype = Union{}
- ctypes´ = Vector{Any}[]
- infos´ = Vector{MaybeAbstractIterationInfo}[]
+ ctypes´::Vector{Vector{Any}} = Vector{Any}[]
+ infos´::Vector{Vector{MaybeAbstractIterationInfo}} = Vector{MaybeAbstractIterationInfo}[]
local ti, argtypesi
local ctfuture::Future{AbstractIterationResult}
local callfuture::Future{CallMeta}
@@ -2391,7 +2387,7 @@ function abstract_throw_methoderror(interp::AbstractInterpreter, argtypes::Vecto
return Future(CallMeta(Union{}, exct, EFFECTS_THROWS, NoCallInfo()))
end
-const generic_getglobal_effects = Effects(EFFECTS_THROWS, consistent=ALWAYS_FALSE, inaccessiblememonly=ALWAYS_FALSE)
+const generic_getglobal_effects = Effects(EFFECTS_THROWS, effect_free=ALWAYS_FALSE, consistent=ALWAYS_FALSE, inaccessiblememonly=ALWAYS_FALSE) #= effect_free for depwarn =#
const generic_getglobal_exct = Union{ArgumentError, TypeError, ConcurrencyViolationError, UndefVarError}
function abstract_eval_getglobal(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, @nospecialize(M), @nospecialize(s))
⊑ = partialorder(typeinf_lattice(interp))
@@ -2399,8 +2395,8 @@ function abstract_eval_getglobal(interp::AbstractInterpreter, sv::AbsIntState, s
M, s = M.val, s.val
if M isa Module && s isa Symbol
gr = GlobalRef(M, s)
- (ret, bpart) = abstract_eval_globalref(interp, gr, saw_latestworld, sv)
- return CallMeta(ret, bpart === nothing ? NoCallInfo() : GlobalAccessInfo(convert(Core.Binding, gr), bpart))
+ ret = abstract_eval_globalref(interp, gr, saw_latestworld, sv)
+ return CallMeta(ret, GlobalAccessInfo(convert(Core.Binding, gr)))
end
return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo())
elseif !hasintersect(widenconst(M), Module) || !hasintersect(widenconst(s), Symbol)
@@ -2425,11 +2421,15 @@ function abstract_eval_getglobal(interp::AbstractInterpreter, sv::AbsIntState, s
end
function abstract_eval_getglobal(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, argtypes::Vector{Any})
- if length(argtypes) == 3
- return abstract_eval_getglobal(interp, sv, saw_latestworld, argtypes[2], argtypes[3])
- elseif length(argtypes) == 4
- return abstract_eval_getglobal(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4])
- elseif !isvarargtype(argtypes[end]) || length(argtypes) > 5
+ if !isvarargtype(argtypes[end])
+ if length(argtypes) == 3
+ return abstract_eval_getglobal(interp, sv, saw_latestworld, argtypes[2], argtypes[3])
+ elseif length(argtypes) == 4
+ return abstract_eval_getglobal(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4])
+ else
+ return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo())
+ end
+ elseif length(argtypes) > 5
return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo())
else
return CallMeta(Any, generic_getglobal_exct, generic_getglobal_effects, NoCallInfo())
@@ -2437,24 +2437,30 @@ function abstract_eval_getglobal(interp::AbstractInterpreter, sv::AbsIntState, s
end
@nospecs function abstract_eval_get_binding_type(interp::AbstractInterpreter, sv::AbsIntState, M, s)
+ @nospecialize M s
⊑ = partialorder(typeinf_lattice(interp))
if isa(M, Const) && isa(s, Const)
(M, s) = (M.val, s.val)
if !isa(M, Module) || !isa(s, Symbol)
return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo())
end
- partition = abstract_eval_binding_partition!(interp, GlobalRef(M, s), sv)
-
- if is_some_guard(binding_kind(partition))
- # We do not currently assume an invalidation for guard -> defined transitions
- # rt = Const(nothing)
- rt = Type
- elseif is_some_const_binding(binding_kind(partition))
- rt = Const(Any)
- else
- rt = Const(partition_restriction(partition))
+ gr = GlobalRef(M, s)
+ (valid_worlds, rt) = scan_leaf_partitions(interp, gr, sv.world) do interp::AbstractInterpreter, ::Core.Binding, partition::Core.BindingPartition
+ local rt
+ kind = binding_kind(partition)
+ if is_some_guard(kind) || kind == PARTITION_KIND_DECLARED
+ # We do not currently assume an invalidation for guard -> defined transitions
+ # rt = Const(nothing)
+ rt = Type
+ elseif is_some_const_binding(kind)
+ rt = Const(Any)
+ else
+ rt = Const(partition_restriction(partition))
+ end
+ rt
end
- return CallMeta(rt, Union{}, EFFECTS_TOTAL, NoCallInfo())
+ update_valid_age!(sv, valid_worlds)
+ return CallMeta(rt, Union{}, EFFECTS_TOTAL, GlobalAccessInfo(convert(Core.Binding, gr)))
elseif !hasintersect(widenconst(M), Module) || !hasintersect(widenconst(s), Symbol)
return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo())
elseif M ⊑ Module && s ⊑ Symbol
@@ -2464,12 +2470,17 @@ end
end
function abstract_eval_get_binding_type(interp::AbstractInterpreter, sv::AbsIntState, argtypes::Vector{Any})
- if length(argtypes) == 3
- return abstract_eval_get_binding_type(interp, sv, argtypes[2], argtypes[3])
- elseif !isvarargtype(argtypes[end]) || length(argtypes) > 4
+ if !isvarargtype(argtypes[end])
+ if length(argtypes) == 3
+ return abstract_eval_get_binding_type(interp, sv, argtypes[2], argtypes[3])
+ else
+ return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo())
+ end
+ elseif length(argtypes) > 4
return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo())
+ else
+ return CallMeta(Type, Union{TypeError, ArgumentError}, EFFECTS_THROWS, NoCallInfo())
end
- return CallMeta(Type, Union{TypeError, ArgumentError}, EFFECTS_THROWS, NoCallInfo())
end
const setglobal!_effects = Effects(EFFECTS_TOTAL; effect_free=ALWAYS_FALSE, nothrow=false, inaccessiblememonly=ALWAYS_FALSE)
@@ -2479,8 +2490,8 @@ function abstract_eval_setglobal!(interp::AbstractInterpreter, sv::AbsIntState,
M, s = M.val, s.val
if M isa Module && s isa Symbol
gr = GlobalRef(M, s)
- (rt, exct), partition = global_assignment_rt_exct(interp, sv, saw_latestworld, gr, v)
- return CallMeta(rt, exct, Effects(setglobal!_effects, nothrow=exct===Bottom), GlobalAccessInfo(convert(Core.Binding, gr), partition))
+ (rt, exct) = global_assignment_rt_exct(interp, sv, saw_latestworld, gr, v)
+ return CallMeta(rt, exct, Effects(setglobal!_effects, nothrow=exct===Bottom), GlobalAccessInfo(convert(Core.Binding, gr)))
end
return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo())
end
@@ -2502,11 +2513,15 @@ end
const generic_setglobal!_exct = Union{ArgumentError, TypeError, ErrorException, ConcurrencyViolationError}
function abstract_eval_setglobal!(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, argtypes::Vector{Any})
- if length(argtypes) == 4
- return abstract_eval_setglobal!(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4])
- elseif length(argtypes) == 5
- return abstract_eval_setglobal!(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4], argtypes[5])
- elseif !isvarargtype(argtypes[end]) || length(argtypes) > 6
+ if !isvarargtype(argtypes[end])
+ if length(argtypes) == 4
+ return abstract_eval_setglobal!(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4])
+ elseif length(argtypes) == 5
+ return abstract_eval_setglobal!(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4], argtypes[5])
+ else
+ return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo())
+ end
+ elseif length(argtypes) > 6
return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo())
else
return CallMeta(Any, generic_setglobal!_exct, setglobal!_effects, NoCallInfo())
@@ -2530,11 +2545,15 @@ function abstract_eval_swapglobal!(interp::AbstractInterpreter, sv::AbsIntState,
end
function abstract_eval_swapglobal!(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, argtypes::Vector{Any})
- if length(argtypes) == 4
- return abstract_eval_swapglobal!(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4])
- elseif length(argtypes) == 5
- return abstract_eval_swapglobal!(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4], argtypes[5])
- elseif !isvarargtype(argtypes[end]) || length(argtypes) > 6
+ if !isvarargtype(argtypes[end])
+ if length(argtypes) == 4
+ return abstract_eval_swapglobal!(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4])
+ elseif length(argtypes) == 5
+ return abstract_eval_swapglobal!(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4], argtypes[5])
+ else
+ return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo())
+ end
+ elseif length(argtypes) > 6
return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo())
else
return CallMeta(Any, Union{generic_getglobal_exct,generic_setglobal!_exct}, setglobal!_effects, NoCallInfo())
@@ -2542,18 +2561,22 @@ function abstract_eval_swapglobal!(interp::AbstractInterpreter, sv::AbsIntState,
end
function abstract_eval_setglobalonce!(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, argtypes::Vector{Any})
- if length(argtypes) in (4, 5, 6)
- cm = abstract_eval_setglobal!(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4])
- if length(argtypes) >= 5
- goe = global_order_exct(argtypes[5], #=loading=#true, #=storing=#true)
- cm = merge_exct(cm, goe)
- end
- if length(argtypes) == 6
- goe = global_order_exct(argtypes[6], #=loading=#true, #=storing=#false)
- cm = merge_exct(cm, goe)
- end
- return CallMeta(Bool, cm.exct, cm.effects, cm.info)
- elseif !isvarargtype(argtypes[end]) || length(argtypes) > 6
+ if !isvarargtype(argtypes[end])
+ if length(argtypes) in (4, 5, 6)
+ cm = abstract_eval_setglobal!(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4])
+ if length(argtypes) >= 5
+ goe = global_order_exct(argtypes[5], #=loading=#true, #=storing=#true)
+ cm = merge_exct(cm, goe)
+ end
+ if length(argtypes) == 6
+ goe = global_order_exct(argtypes[6], #=loading=#true, #=storing=#false)
+ cm = merge_exct(cm, goe)
+ end
+ return CallMeta(Bool, cm.exct, cm.effects, cm.info)
+ else
+ return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo())
+ end
+ elseif length(argtypes) > 7
return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo())
else
return CallMeta(Bool, generic_setglobal!_exct, setglobal!_effects, NoCallInfo())
@@ -2561,38 +2584,48 @@ function abstract_eval_setglobalonce!(interp::AbstractInterpreter, sv::AbsIntSta
end
function abstract_eval_replaceglobal!(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, argtypes::Vector{Any})
- if length(argtypes) in (5, 6, 7)
- (M, s, x, v) = argtypes[2], argtypes[3], argtypes[4], argtypes[5]
- T = nothing
- if isa(M, Const) && isa(s, Const)
- M, s = M.val, s.val
- M isa Module || return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo())
- s isa Symbol || return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo())
- gr = GlobalRef(M, s)
- partition = abstract_eval_binding_partition!(interp, gr, sv)
- rte = abstract_eval_partition_load(interp, partition)
- if binding_kind(partition) == BINDING_KIND_GLOBAL
- T = partition_restriction(partition)
+ if !isvarargtype(argtypes[end])
+ if length(argtypes) in (5, 6, 7)
+ (M, s, x, v) = argtypes[2], argtypes[3], argtypes[4], argtypes[5]
+ T = nothing
+ if isa(M, Const) && isa(s, Const)
+ M, s = M.val, s.val
+ M isa Module || return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo())
+ s isa Symbol || return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo())
+ gr = GlobalRef(M, s)
+ v′ = RefValue{Any}(v)
+ (valid_worlds, (rte, T)) = scan_leaf_partitions(interp, gr, sv.world) do interp::AbstractInterpreter, binding::Core.Binding, partition::Core.BindingPartition
+ partition_T = nothing
+ partition_rte = abstract_eval_partition_load(interp, binding, partition)
+ if binding_kind(partition) == PARTITION_KIND_GLOBAL
+ partition_T = partition_restriction(partition)
+ end
+ partition_exct = Union{partition_rte.exct, global_assignment_binding_rt_exct(interp, partition, v′[])[2]}
+ partition_rte = RTEffects(partition_rte.rt, partition_exct, partition_rte.effects)
+ Pair{RTEffects, Any}(partition_rte, partition_T)
+ end
+ update_valid_age!(sv, valid_worlds)
+ effects = merge_effects(rte.effects, Effects(setglobal!_effects, nothrow=rte.exct===Bottom))
+ sg = CallMeta(Any, rte.exct, effects, GlobalAccessInfo(convert(Core.Binding, gr)))
+ else
+ sg = abstract_eval_setglobal!(interp, sv, saw_latestworld, M, s, v)
end
- exct = Union{rte.exct, global_assignment_binding_rt_exct(interp, partition, v)[2]}
- effects = merge_effects(rte.effects, Effects(setglobal!_effects, nothrow=exct===Bottom))
- sg = CallMeta(Any, exct, effects, GlobalAccessInfo(convert(Core.Binding, gr), partition))
+ if length(argtypes) >= 6
+ goe = global_order_exct(argtypes[6], #=loading=#true, #=storing=#true)
+ sg = merge_exct(sg, goe)
+ end
+ if length(argtypes) == 7
+ goe = global_order_exct(argtypes[7], #=loading=#true, #=storing=#false)
+ sg = merge_exct(sg, goe)
+ end
+ rt = T === nothing ?
+ ccall(:jl_apply_cmpswap_type, Any, (Any,), S) where S :
+ ccall(:jl_apply_cmpswap_type, Any, (Any,), T)
+ return CallMeta(rt, sg.exct, sg.effects, sg.info)
else
- sg = abstract_eval_setglobal!(interp, sv, saw_latestworld, M, s, v)
- end
- if length(argtypes) >= 6
- goe = global_order_exct(argtypes[6], #=loading=#true, #=storing=#true)
- sg = merge_exct(sg, goe)
+ return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo())
end
- if length(argtypes) == 7
- goe = global_order_exct(argtypes[7], #=loading=#true, #=storing=#false)
- sg = merge_exct(sg, goe)
- end
- rt = T === nothing ?
- ccall(:jl_apply_cmpswap_type, Any, (Any,), S) where S :
- ccall(:jl_apply_cmpswap_type, Any, (Any,), T)
- return CallMeta(rt, sg.exct, sg.effects, sg.info)
- elseif !isvarargtype(argtypes[end]) || length(argtypes) > 8
+ elseif length(argtypes) > 8
return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo())
else
return CallMeta(Any, Union{generic_getglobal_exct,generic_setglobal!_exct}, setglobal!_effects, NoCallInfo())
@@ -2647,11 +2680,8 @@ function abstract_call_known(interp::AbstractInterpreter, @nospecialize(f),
return Future(abstract_eval_isdefinedglobal(interp, argtypes[2], argtypes[3], Const(true),
length(argtypes) == 4 ? argtypes[4] : Const(:unordered),
si.saw_latestworld, sv))
- elseif f === Core.isdefinedglobal && 3 <= length(argtypes) <= 5
- return Future(abstract_eval_isdefinedglobal(interp, argtypes[2], argtypes[3],
- length(argtypes) >= 4 ? argtypes[4] : Const(true),
- length(argtypes) >= 5 ? argtypes[5] : Const(:unordered),
- si.saw_latestworld, sv))
+ elseif f === Core.isdefinedglobal
+ return Future(abstract_eval_isdefinedglobal(interp, sv, si.saw_latestworld, argtypes))
elseif f === Core.get_binding_type
return Future(abstract_eval_get_binding_type(interp, sv, argtypes))
end
@@ -2947,7 +2977,7 @@ function abstract_eval_special_value(interp::AbstractInterpreter, @nospecialize(
end
elseif isa(e, GlobalRef)
# No need for an edge since an explicit GlobalRef will be picked up by the source scan
- return abstract_eval_globalref(interp, e, sstate.saw_latestworld, sv)[1]
+ return abstract_eval_globalref(interp, e, sstate.saw_latestworld, sv)
end
if isa(e, QuoteNode)
e = e.value
@@ -3232,25 +3262,29 @@ function abstract_eval_isdefinedglobal(interp::AbstractInterpreter, mod::Module,
effects = EFFECTS_TOTAL
gr = GlobalRef(mod, sym)
- partition = lookup_binding_partition!(interp, gr, sv)
- if allow_import !== true && is_some_imported(binding_kind(partition))
- if allow_import === false
- rt = Const(false)
- else
- effects = Effects(generic_isdefinedglobal_effects, nothrow=true)
+ if allow_import !== true
+ gr = GlobalRef(mod, sym)
+ partition = lookup_binding_partition!(interp, gr, sv)
+ if allow_import !== true && is_some_binding_imported(binding_kind(partition))
+ if allow_import === false
+ rt = Const(false)
+ else
+ effects = Effects(generic_isdefinedglobal_effects, nothrow=true)
+ end
+ @goto done
end
+ end
+
+ (valid_worlds, rte) = abstract_load_all_consistent_leaf_partitions(interp, gr, sv.world)
+ if rte.exct == Union{}
+ rt = Const(true)
+ elseif rte.rt === Union{} && rte.exct === UndefVarError
+ rt = Const(false)
else
- partition = walk_binding_partition!(interp, partition, sv)
- rte = abstract_eval_partition_load(interp, partition)
- if rte.exct == Union{}
- rt = Const(true)
- elseif rte.rt === Union{} && rte.exct === UndefVarError
- rt = Const(false)
- else
- effects = Effects(generic_isdefinedglobal_effects, nothrow=true)
- end
+ effects = Effects(generic_isdefinedglobal_effects, nothrow=true)
end
- return CallMeta(RTEffects(rt, Union{}, effects), GlobalAccessInfo(convert(Core.Binding, gr), partition))
+@label done
+ return CallMeta(RTEffects(rt, Union{}, effects), GlobalAccessInfo(convert(Core.Binding, gr)))
end
function abstract_eval_isdefinedglobal(interp::AbstractInterpreter, @nospecialize(M), @nospecialize(s), @nospecialize(allow_import_arg), @nospecialize(order_arg), saw_latestworld::Bool, sv::AbsIntState)
@@ -3272,6 +3306,7 @@ function abstract_eval_isdefinedglobal(interp::AbstractInterpreter, @nospecializ
exct = Union{exct, ConcurrencyViolationError}
end
end
+ ⊑ = partialorder(typeinf_lattice(interp))
if M isa Const && s isa Const
M, s = M.val, s.val
if M isa Module && s isa Symbol
@@ -3286,6 +3321,23 @@ function abstract_eval_isdefinedglobal(interp::AbstractInterpreter, @nospecializ
return CallMeta(Bool, Union{exct, TypeError, UndefVarError}, generic_isdefinedglobal_effects, NoCallInfo())
end
+function abstract_eval_isdefinedglobal(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, argtypes::Vector{Any})
+ if !isvarargtype(argtypes[end])
+ if 3 <= length(argtypes) <= 5
+ return abstract_eval_isdefinedglobal(interp, argtypes[2], argtypes[3],
+ length(argtypes) >= 4 ? argtypes[4] : Const(true),
+ length(argtypes) >= 5 ? argtypes[5] : Const(:unordered),
+ saw_latestworld, sv)
+ else
+ return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo())
+ end
+ elseif length(argtypes) > 6
+ return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo())
+ else
+ return CallMeta(Bool, Union{ConcurrencyViolationError, TypeError, UndefVarError}, generic_isdefinedglobal_effects, NoCallInfo())
+ end
+end
+
function abstract_eval_throw_undef_if_not(interp::AbstractInterpreter, e::Expr, sstate::StatementState, sv::AbsIntState)
condt = abstract_eval_value(interp, e.args[2], sstate, sv)
condval = maybe_extract_const_bool(condt)
@@ -3409,7 +3461,7 @@ function abstract_eval_foreigncall(interp::AbstractInterpreter, e::Expr, sstate:
abstract_eval_value(interp, x, sstate, sv)
end
cconv = e.args[5]
- if isa(cconv, QuoteNode) && (v = cconv.value; isa(v, Tuple{Symbol, UInt16}))
+ if isa(cconv, QuoteNode) && (v = cconv.value; isa(v, Tuple{Symbol, UInt16, Bool}))
override = decode_effects_override(v[2])
effects = override_effects(effects, override)
end
@@ -3439,6 +3491,20 @@ function merge_override_effects!(interp::AbstractInterpreter, effects::Effects,
# It is possible for arguments (GlobalRef/:static_parameter) to throw,
# but these will be recomputed during SSA construction later.
override = decode_statement_effects_override(sv)
+ if override.consistent
+ m = sv.linfo.def
+ if isa(m, Method)
+ # N.B.: We'd like deleted_world here, but we can't add an appropriate edge at this point.
+ # However, in order to reach here in the first place, ordinary method lookup would have
+ # had to add an edge and appropriate invalidation trigger.
+ valid_worlds = WorldRange(m.primary_world, typemax(Int))
+ if sv.world.this in valid_worlds
+ update_valid_age!(sv, valid_worlds)
+ else
+ override = EffectsOverride(override, consistent=false)
+ end
+ end
+ end
effects = override_effects(effects, override)
set_curr_ssaflag!(sv, flags_for_effects(effects), IR_FLAGS_EFFECTS)
merge_effects!(interp, sv, effects)
@@ -3464,129 +3530,181 @@ world_range(ci::CodeInfo) = WorldRange(ci.min_world, ci.max_world)
world_range(ci::CodeInstance) = WorldRange(ci.min_world, ci.max_world)
world_range(compact::IncrementalCompact) = world_range(compact.ir)
-function force_binding_resolution!(g::GlobalRef, world::UInt)
- # Force resolution of the binding
- # TODO: This will go away once we switch over to fully partitioned semantics
- ccall(:jl_force_binding_resolution, Cvoid, (Any, Csize_t), g, world)
- return nothing
-end
-
-function abstract_eval_globalref_type(g::GlobalRef, src::Union{CodeInfo, IRCode, IncrementalCompact}, retry_after_resolve::Bool=true)
+function abstract_eval_globalref_type(g::GlobalRef, src::Union{CodeInfo, IRCode, IncrementalCompact})
worlds = world_range(src)
partition = lookup_binding_partition(min_world(worlds), g)
- partition.max_world < max_world(worlds) && return Any
- while is_some_imported(binding_kind(partition))
- imported_binding = partition_restriction(partition)::Core.Binding
- partition = lookup_binding_partition(min_world(worlds), imported_binding)
- partition.max_world < max_world(worlds) && return Any
- end
- if is_some_guard(binding_kind(partition))
- if retry_after_resolve
- # This method is surprisingly hot. For performance, don't ask the runtime to resolve
- # the binding unless necessary - doing so triggers an additional lookup, which though
- # not super expensive is hot enough to show up in benchmarks.
- force_binding_resolution!(g, min_world(worlds))
- return abstract_eval_globalref_type(g, src, false)
- end
- # return Union{}
+
+ (valid_worlds, rte) = abstract_load_all_consistent_leaf_partitions(nothing, g, WorldWithRange(min_world(worlds), worlds))
+ if min_world(valid_worlds) > min_world(worlds) || max_world(valid_worlds) < max_world(worlds)
return Any
end
- if is_some_const_binding(binding_kind(partition))
- return Const(partition_restriction(partition))
- end
- return partition_restriction(partition)
+
+ return rte.rt
end
-function lookup_binding_partition!(interp::AbstractInterpreter, g::GlobalRef, sv::AbsIntState)
- force_binding_resolution!(g, get_inference_world(interp))
+function lookup_binding_partition!(interp::AbstractInterpreter, g::Union{GlobalRef, Core.Binding}, sv::AbsIntState)
partition = lookup_binding_partition(get_inference_world(interp), g)
update_valid_age!(sv, WorldRange(partition.min_world, partition.max_world))
partition
end
-function walk_binding_partition!(interp::AbstractInterpreter, partition::Core.BindingPartition, sv::AbsIntState)
- while is_some_imported(binding_kind(partition))
+function walk_binding_partition(imported_binding::Core.Binding, partition::Core.BindingPartition, world::UInt)
+ valid_worlds = WorldRange(partition.min_world, partition.max_world)
+ while is_some_binding_imported(binding_kind(partition))
imported_binding = partition_restriction(partition)::Core.Binding
- partition = lookup_binding_partition(get_inference_world(interp), imported_binding)
- update_valid_age!(sv, WorldRange(partition.min_world, partition.max_world))
+ partition = lookup_binding_partition(world, imported_binding)
+ valid_worlds = intersect(valid_worlds, WorldRange(partition.min_world, partition.max_world))
end
- return partition
+ return Pair{WorldRange, Pair{Core.Binding, Core.BindingPartition}}(valid_worlds, imported_binding=>partition)
end
function abstract_eval_binding_partition!(interp::AbstractInterpreter, g::GlobalRef, sv::AbsIntState)
- partition = lookup_binding_partition!(interp, g, sv)
- partition = walk_binding_partition!(interp, partition, sv)
+ b = convert(Core.Binding, g)
+ partition = lookup_binding_partition!(interp, b, sv)
+ valid_worlds, (_, partition) = walk_binding_partition(b, partition, get_inference_world(interp))
+ update_valid_age!(sv, valid_worlds)
return partition
end
-function abstract_eval_partition_load(interp::AbstractInterpreter, partition::Core.BindingPartition)
+function abstract_eval_partition_load(interp::Union{AbstractInterpreter,Nothing}, binding::Core.Binding, partition::Core.BindingPartition)
kind = binding_kind(partition)
- if is_some_guard(kind) || kind == BINDING_KIND_UNDEF_CONST
- if InferenceParams(interp).assume_bindings_static
+ isdepwarn = (partition.kind & PARTITION_FLAG_DEPWARN) != 0
+ local_getglobal_effects = Effects(generic_getglobal_effects, effect_free=isdepwarn ? ALWAYS_FALSE : ALWAYS_TRUE)
+ if is_some_guard(kind)
+ if interp !== nothing && InferenceParams(interp).assume_bindings_static
return RTEffects(Union{}, UndefVarError, EFFECTS_THROWS)
else
# We do not currently assume an invalidation for guard -> defined transitions
# return RTEffects(Union{}, UndefVarError, EFFECTS_THROWS)
- return RTEffects(Any, UndefVarError, generic_getglobal_effects)
+ return RTEffects(Any, UndefVarError, local_getglobal_effects)
end
end
if is_defined_const_binding(kind)
- if kind == BINDING_KIND_BACKDATED_CONST
+ if kind == PARTITION_KIND_BACKDATED_CONST
# Infer this as guard. We do not want a later const definition to retroactively improve
# inference results in an earlier world.
- return RTEffects(Any, UndefVarError, generic_getglobal_effects)
+ return RTEffects(Any, UndefVarError, local_getglobal_effects)
end
rt = Const(partition_restriction(partition))
- return RTEffects(rt, Union{}, Effects(EFFECTS_TOTAL, inaccessiblememonly=is_mutation_free_argtype(rt) ? ALWAYS_TRUE : ALWAYS_FALSE))
+ return RTEffects(rt, Union{}, Effects(EFFECTS_TOTAL,
+ inaccessiblememonly=is_mutation_free_argtype(rt) ? ALWAYS_TRUE : ALWAYS_FALSE,
+ effect_free=isdepwarn ? ALWAYS_FALSE : ALWAYS_TRUE))
end
- rt = partition_restriction(partition)
- return RTEffects(rt, UndefVarError, generic_getglobal_effects)
+ if kind == PARTITION_KIND_DECLARED
+ # Could be replaced by a backdated const which has an effect, so we can't assume it won't.
+ # Besides, we would prefer not to merge the world range for this into the world range for
+ # _GLOBAL, because that would pessimize codegen.
+ effects = Effects(local_getglobal_effects, effect_free=ALWAYS_FALSE)
+ rt = Any
+ else
+ rt = partition_restriction(partition)
+ effects = local_getglobal_effects
+ end
+ if (interp !== nothing && InferenceParams(interp).assume_bindings_static &&
+ kind in (PARTITION_KIND_GLOBAL, PARTITION_KIND_DECLARED) &&
+ isdefined(binding, :value))
+ exct = Union{}
+ effects = Effects(generic_getglobal_effects; nothrow=true)
+ else
+ # We do not assume in general that assigned global bindings remain assigned.
+ # The existence of pkgimages allows them to revert in practice.
+ exct = UndefVarError
+ end
+ return RTEffects(rt, exct, effects)
+end
+
+function scan_specified_partitions(query::F1, walk_binding_partition::F2,
+ interp::Union{AbstractInterpreter,Nothing}, g::GlobalRef, wwr::WorldWithRange) where {F1,F2}
+ local total_validity, rte, binding_partition
+ binding = convert(Core.Binding, g)
+ lookup_world = max_world(wwr.valid_worlds)
+ while true
+ # Partitions are ordered newest-to-oldest so start at the top
+ binding_partition = @isdefined(binding_partition) ?
+ lookup_binding_partition(lookup_world, binding, binding_partition) :
+ lookup_binding_partition(lookup_world, binding)
+ while lookup_world >= binding_partition.min_world && (!@isdefined(total_validity) || min_world(total_validity) > min_world(wwr.valid_worlds))
+ partition_validity, (leaf_binding, leaf_partition) = walk_binding_partition(binding, binding_partition, lookup_world)
+ @assert lookup_world in partition_validity
+ this_rte = query(interp, leaf_binding, leaf_partition)
+ if @isdefined(rte)
+ if this_rte === rte
+ total_validity = union(total_validity, partition_validity)
+ lookup_world = min_world(total_validity) - 1
+ continue
+ end
+ if min_world(total_validity) <= wwr.this
+ @goto out
+ end
+ end
+ total_validity = partition_validity
+ lookup_world = min_world(total_validity) - 1
+ rte = this_rte
+ end
+ min_world(total_validity) > min_world(wwr.valid_worlds) || break
+ end
+@label out
+ return Pair{WorldRange, typeof(rte)}(total_validity, rte)
end
+scan_leaf_partitions(query::F, ::Nothing, g::GlobalRef, wwr::WorldWithRange) where F =
+ scan_specified_partitions(query, walk_binding_partition, nothing, g, wwr)
+scan_leaf_partitions(query::F, interp::AbstractInterpreter, g::GlobalRef, wwr::WorldWithRange) where F =
+ scan_specified_partitions(query, walk_binding_partition, interp, g, wwr)
+
+function scan_partitions(query::F, interp::AbstractInterpreter, g::GlobalRef, wwr::WorldWithRange) where F
+ walk_binding_partition = function (b::Core.Binding, partition::Core.BindingPartition, world::UInt)
+ Pair{WorldRange, Pair{Core.Binding, Core.BindingPartition}}(
+ WorldRange(partition.min_world, partition.max_world), b=>partition)
+ end
+ return scan_specified_partitions(query, walk_binding_partition, interp, g, wwr)
+end
+
+abstract_load_all_consistent_leaf_partitions(interp::AbstractInterpreter, g::GlobalRef, wwr::WorldWithRange) =
+ scan_leaf_partitions(abstract_eval_partition_load, interp, g, wwr)
+abstract_load_all_consistent_leaf_partitions(::Nothing, g::GlobalRef, wwr::WorldWithRange) =
+ scan_leaf_partitions(abstract_eval_partition_load, nothing, g, wwr)
+
function abstract_eval_globalref(interp::AbstractInterpreter, g::GlobalRef, saw_latestworld::Bool, sv::AbsIntState)
if saw_latestworld
- return Pair{RTEffects, Union{Nothing, Core.BindingPartition}}(RTEffects(Any, Any, generic_getglobal_effects), nothing)
- end
- partition = abstract_eval_binding_partition!(interp, g, sv)
- ret = abstract_eval_partition_load(interp, partition)
- if ret.rt !== Union{} && ret.exct === UndefVarError && InferenceParams(interp).assume_bindings_static
- b = convert(Core.Binding, g)
- if isdefined(b, :value)
- ret = RTEffects(ret.rt, Union{}, Effects(generic_getglobal_effects, nothrow=true))
- end
- # We do not assume in general that assigned global bindings remain assigned.
- # The existence of pkgimages allows them to revert in practice.
+ return RTEffects(Any, Any, generic_getglobal_effects)
end
- return Pair{RTEffects, Union{Nothing, Core.BindingPartition}}(ret, partition)
+ # For inference purposes, we don't particularly care which global binding we end up loading, we only
+ # care about its type. However, we would still like to terminate the world range for the particular
+ # binding we end up reaching such that codegen can emit a simpler pointer load.
+ (valid_worlds, ret) = scan_leaf_partitions(abstract_eval_partition_load, interp, g, sv.world)
+ update_valid_age!(sv, valid_worlds)
+ return ret
end
function global_assignment_rt_exct(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, g::GlobalRef, @nospecialize(newty))
if saw_latestworld
- return Pair{Pair{Any,Any}, Union{Core.BindingPartition, Nothing}}(
- Pair{Any,Any}(newty, Union{ErrorException, TypeError}), nothing)
+ return Pair{Any,Any}(newty, ErrorException)
+ end
+ newty′ = RefValue{Any}(newty)
+ (valid_worlds, ret) = scan_partitions(interp, g, sv.world) do interp::AbstractInterpreter, ::Core.Binding, partition::Core.BindingPartition
+ global_assignment_binding_rt_exct(interp, partition, newty′[])
end
- partition = abstract_eval_binding_partition!(interp, g, sv)
- return Pair{Pair{Any,Any}, Union{Core.BindingPartition, Nothing}}(
- global_assignment_binding_rt_exct(interp, partition, newty),
- partition)
+ update_valid_age!(sv, valid_worlds)
+ return ret
end
function global_assignment_binding_rt_exct(interp::AbstractInterpreter, partition::Core.BindingPartition, @nospecialize(newty))
kind = binding_kind(partition)
if is_some_guard(kind)
return Pair{Any,Any}(newty, ErrorException)
- elseif is_some_const_binding(kind)
+ elseif is_some_const_binding(kind) || is_some_imported(kind)
return Pair{Any,Any}(Bottom, ErrorException)
end
- ty = partition_restriction(partition)
+ ty = kind == PARTITION_KIND_DECLARED ? Any : partition_restriction(partition)
wnewty = widenconst(newty)
if !hasintersect(wnewty, ty)
- return Pair{Any,Any}(Bottom, TypeError)
+ return Pair{Any,Any}(Bottom, ErrorException)
elseif !(wnewty <: ty)
retty = tmeet(typeinf_lattice(interp), newty, ty)
- return Pair{Any,Any}(retty, TypeError)
+ return Pair{Any,Any}(retty, ErrorException)
end
return Pair{Any,Any}(newty, Bottom)
end
@@ -3602,6 +3720,108 @@ function abstract_eval_ssavalue(s::SSAValue, ssavaluetypes::Vector{Any})
return typ
end
+struct AbstractEvalBasicStatementResult
+ rt
+ exct
+ effects::Union{Nothing,Effects}
+ changes::Union{Nothing,StateUpdate}
+ refinements # ::Union{Nothing,SlotRefinement,Vector{Any}}
+ currsaw_latestworld::Bool
+ function AbstractEvalBasicStatementResult(rt, exct, effects::Union{Nothing,Effects},
+ changes::Union{Nothing,StateUpdate}, refinements, currsaw_latestworld::Bool)
+ @nospecialize rt exct refinements
+ return new(rt, exct, effects, changes, refinements, currsaw_latestworld)
+ end
+end
+
+@inline function abstract_eval_basic_statement(
+ interp::AbstractInterpreter, @nospecialize(stmt), sstate::StatementState, frame::InferenceState,
+ result::Union{Nothing,Future{RTEffects}}=nothing)
+ rt = nothing
+ exct = Bottom
+ changes = nothing
+ refinements = nothing
+ effects = nothing
+ currsaw_latestworld = sstate.saw_latestworld
+ if result !== nothing
+ @goto injectresult
+ end
+ if isa(stmt, NewvarNode)
+ changes = StateUpdate(stmt.slot, VarState(Bottom, true))
+ elseif isa(stmt, PhiNode)
+ add_curr_ssaflag!(frame, IR_FLAGS_REMOVABLE)
+ # Implement convergence for PhiNodes. In particular, PhiNodes need to tmerge over
+ # the incoming values from all iterations, but `abstract_eval_phi` will only tmerge
+ # over the first and last iterations. By tmerging in the current old_rt, we ensure that
+ # we will not lose an intermediate value.
+ rt = abstract_eval_phi(interp, stmt, sstate, frame)
+ old_rt = frame.ssavaluetypes[frame.currpc]
+ rt = old_rt === NOT_FOUND ? rt : tmerge(typeinf_lattice(interp), old_rt, rt)
+ else
+ lhs = nothing
+ if isexpr(stmt, :(=))
+ lhs = stmt.args[1]
+ stmt = stmt.args[2]
+ end
+ if !isa(stmt, Expr)
+ (; rt, exct, effects, refinements) = abstract_eval_special_value(interp, stmt, sstate, frame)
+ else
+ hd = stmt.head
+ if hd === :method
+ fname = stmt.args[1]
+ if isa(fname, SlotNumber)
+ changes = StateUpdate(fname, VarState(Any, false))
+ end
+ elseif (hd === :code_coverage_effect ||
+ # :boundscheck can be narrowed to Bool
+ (hd !== :boundscheck && is_meta_expr(stmt)))
+ rt = Nothing
+ elseif hd === :latestworld
+ currsaw_latestworld = true
+ rt = Nothing
+ else
+ result = abstract_eval_statement_expr(interp, stmt, sstate, frame)::Future{RTEffects}
+ if !isready(result) || !isempty(frame.tasks)
+ return result
+
+ @label injectresult
+ # reload local variables
+ lhs = nothing
+ if isexpr(stmt, :(=))
+ lhs = stmt.args[1]
+ stmt = stmt.args[2]
+ end
+ end
+ result = result[]
+ (; rt, exct, effects, refinements) = result
+ if effects.noub === NOUB_IF_NOINBOUNDS
+ if has_curr_ssaflag(frame, IR_FLAG_INBOUNDS)
+ effects = Effects(effects; noub=ALWAYS_FALSE)
+ elseif !propagate_inbounds(frame)
+ # The callee read our inbounds flag, but unless we propagate inbounds,
+ # we ourselves don't read our parent's inbounds.
+ effects = Effects(effects; noub=ALWAYS_TRUE)
+ end
+ end
+ @assert !isa(rt, TypeVar) "unhandled TypeVar"
+ rt = maybe_singleton_const(rt)
+ if !isempty(frame.pclimitations)
+ if rt isa Const || rt === Union{}
+ empty!(frame.pclimitations)
+ else
+ rt = LimitedAccuracy(rt, frame.pclimitations)
+ frame.pclimitations = IdSet{InferenceState}()
+ end
+ end
+ end
+ end
+ if lhs !== nothing && rt !== Bottom
+ changes = StateUpdate(lhs::SlotNumber, VarState(rt, false))
+ end
+ end
+ return AbstractEvalBasicStatementResult(rt, exct, effects, changes, refinements, currsaw_latestworld)
+end
+
struct BestguessInfo{Interp<:AbstractInterpreter}
interp::Interp
bestguess
@@ -3883,14 +4103,16 @@ end
# make as much progress on `frame` as possible (without handling cycles)
struct CurrentState
- result::Future
+ result::Future{RTEffects}
currstate::VarTable
currsaw_latestworld::Bool
bbstart::Int
bbend::Int
- CurrentState(result::Future, currstate::VarTable, currsaw_latestworld::Bool, bbstart::Int, bbend::Int) = new(result, currstate, currsaw_latestworld, bbstart, bbend)
+ CurrentState(result::Future{RTEffects}, currstate::VarTable, currsaw_latestworld::Bool, bbstart::Int, bbend::Int) =
+ new(result, currstate, currsaw_latestworld, bbstart, bbend)
CurrentState() = new()
end
+
function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextresult::CurrentState)
@assert !is_inferred(frame)
W = frame.ip
@@ -3909,7 +4131,9 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr
bbend = nextresult.bbend
currstate = nextresult.currstate
currsaw_latestworld = nextresult.currsaw_latestworld
- @goto injectresult
+ stmt = frame.src.code[currpc]
+ result = abstract_eval_basic_statement(interp, stmt, StatementState(currstate, currsaw_latestworld), frame, nextresult.result)
+ @goto injected_result
end
if currbb != 1
@@ -4062,87 +4286,15 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr
end
# Process non control-flow statements
@assert isempty(frame.tasks)
- rt = nothing
- exct = Bottom
- changes = nothing
- refinements = nothing
- effects = nothing
- if isa(stmt, NewvarNode)
- changes = StateUpdate(stmt.slot, VarState(Bottom, true))
- elseif isa(stmt, PhiNode)
- add_curr_ssaflag!(frame, IR_FLAGS_REMOVABLE)
- # Implement convergence for PhiNodes. In particular, PhiNodes need to tmerge over
- # the incoming values from all iterations, but `abstract_eval_phi` will only tmerge
- # over the first and last iterations. By tmerging in the current old_rt, we ensure that
- # we will not lose an intermediate value.
- rt = abstract_eval_phi(interp, stmt, StatementState(currstate, currsaw_latestworld), frame)
- old_rt = frame.ssavaluetypes[currpc]
- rt = old_rt === NOT_FOUND ? rt : tmerge(typeinf_lattice(interp), old_rt, rt)
+ sstate = StatementState(currstate, currsaw_latestworld)
+ result = abstract_eval_basic_statement(interp, stmt, sstate, frame)
+ if result isa Future{RTEffects}
+ return CurrentState(result, currstate, currsaw_latestworld, bbstart, bbend)
else
- lhs = nothing
- if isexpr(stmt, :(=))
- lhs = stmt.args[1]
- stmt = stmt.args[2]
- end
- if !isa(stmt, Expr)
- (; rt, exct, effects, refinements) = abstract_eval_special_value(interp, stmt, StatementState(currstate, currsaw_latestworld), frame)
- else
- hd = stmt.head
- if hd === :method
- fname = stmt.args[1]
- if isa(fname, SlotNumber)
- changes = StateUpdate(fname, VarState(Any, false))
- end
- elseif (hd === :code_coverage_effect || (
- hd !== :boundscheck && # :boundscheck can be narrowed to Bool
- is_meta_expr(stmt)))
- rt = Nothing
- elseif hd === :latestworld
- currsaw_latestworld = true
- rt = Nothing
- else
- result = abstract_eval_statement_expr(interp, stmt, StatementState(currstate, currsaw_latestworld), frame)::Future
- if !isready(result) || !isempty(frame.tasks)
- return CurrentState(result, currstate, currsaw_latestworld, bbstart, bbend)
- @label injectresult
- # reload local variables
- stmt = frame.src.code[currpc]
- changes = nothing
- lhs = nothing
- if isexpr(stmt, :(=))
- lhs = stmt.args[1]
- stmt = stmt.args[2]
- end
- result = nextresult.result::Future{RTEffects}
- end
- result = result[]
- (; rt, exct, effects, refinements) = result
- if effects.noub === NOUB_IF_NOINBOUNDS
- if has_curr_ssaflag(frame, IR_FLAG_INBOUNDS)
- effects = Effects(effects; noub=ALWAYS_FALSE)
- elseif !propagate_inbounds(frame)
- # The callee read our inbounds flag, but unless we propagate inbounds,
- # we ourselves don't read our parent's inbounds.
- effects = Effects(effects; noub=ALWAYS_TRUE)
- end
- end
- @assert !isa(rt, TypeVar) "unhandled TypeVar"
- rt = maybe_singleton_const(rt)
- if !isempty(frame.pclimitations)
- if rt isa Const || rt === Union{}
- empty!(frame.pclimitations)
- else
- rt = LimitedAccuracy(rt, frame.pclimitations)
- frame.pclimitations = IdSet{InferenceState}()
- end
- end
- end
- end
- effects === nothing || merge_override_effects!(interp, effects, frame)
- if lhs !== nothing && rt !== Bottom
- changes = StateUpdate(lhs::SlotNumber, VarState(rt, false))
- end
+ @label injected_result
+ (; rt, exct, effects, changes, refinements, currsaw_latestworld) = result
end
+ effects === nothing || merge_override_effects!(interp, effects, frame)
if !has_curr_ssaflag(frame, IR_FLAG_NOTHROW)
if exct !== Union{}
update_exc_bestguess!(interp, exct, frame)
@@ -4258,6 +4410,7 @@ end
# make as much progress on `frame` as possible (by handling cycles)
warnlength::Int = 2500
function typeinf(interp::AbstractInterpreter, frame::InferenceState)
+ time_before = _time_ns()
callstack = frame.callstack::Vector{AbsIntState}
nextstates = CurrentState[]
takenext = frame.frameid
@@ -4289,7 +4442,6 @@ function typeinf(interp::AbstractInterpreter, frame::InferenceState)
# get_compileable_sig), but still must be finished up since it may see and
# change the local variables of the InferenceState at currpc, we do this
# even if the nextresult status is already completed.
- continue
elseif isdefined(nextstates[nextstateid], :result) || !isempty(callee.ip)
# Next make progress on this frame
prev = length(callee.tasks) + 1
@@ -4297,16 +4449,23 @@ function typeinf(interp::AbstractInterpreter, frame::InferenceState)
reverse!(callee.tasks, prev)
elseif callee.cycleid == length(callstack)
# With no active ip's and no cycles, frame is done
- finish_nocycle(interp, callee)
+ time_now = _time_ns()
+ callee.time_self_ns += (time_now - time_before)
+ time_before = time_now
+ finish_nocycle(interp, callee, time_before)
callee.frameid == 0 && break
takenext = length(callstack)
nextstateid = takenext + 1 - frame.frameid
#@assert length(nextstates) == nextstateid + 1
#@assert all(i -> !isdefined(nextstates[i], :result), nextstateid+1:length(nextstates))
resize!(nextstates, nextstateid)
+ continue
elseif callee.cycleid == callee.frameid
# If the current frame is the top part of a cycle, check if the whole cycle
# is done, and if not, pick the next item to work on.
+ time_now = _time_ns()
+ callee.time_self_ns += (time_now - time_before)
+ time_before = time_now
no_active_ips_in_cycle = true
for i = callee.cycleid:length(callstack)
caller = callstack[i]::InferenceState
@@ -4317,7 +4476,7 @@ function typeinf(interp::AbstractInterpreter, frame::InferenceState)
end
end
if no_active_ips_in_cycle
- finish_cycle(interp, callstack, callee.cycleid)
+ finish_cycle(interp, callstack, callee.cycleid, time_before)
end
takenext = length(callstack)
nextstateid = takenext + 1 - frame.frameid
@@ -4327,10 +4486,14 @@ function typeinf(interp::AbstractInterpreter, frame::InferenceState)
else
#@assert length(nextstates) == nextstateid
end
+ continue
else
# Continue to the next frame in this cycle
takenext = takenext - 1
end
+ time_now = _time_ns()
+ callee.time_self_ns += (time_now - time_before)
+ time_before = time_now
end
#@assert all(nextresult -> !isdefined(nextresult, :result), nextstates)
return is_inferred(frame)
diff --git a/Compiler/src/bootstrap.jl b/Compiler/src/bootstrap.jl
index c35de48cd20f5..a847d1fb835c7 100644
--- a/Compiler/src/bootstrap.jl
+++ b/Compiler/src/bootstrap.jl
@@ -67,17 +67,10 @@ function bootstrap!()
end
mi = specialize_method(m.method, Tuple{params...}, m.sparams)
#isa_compileable_sig(mi) || println(stderr, "WARNING: inferring `", mi, "` which isn't expected to be called.")
- push!(methods, mi)
+ typeinf_ext_toplevel(mi, world, isa_compileable_sig(mi) ? SOURCE_MODE_ABI : SOURCE_MODE_NOT_REQUIRED)
end
end
end
- codeinfos = typeinf_ext_toplevel(methods, [world], false)
- for i = 1:2:length(codeinfos)
- ci = codeinfos[i]::CodeInstance
- src = codeinfos[i + 1]::CodeInfo
- isa_compileable_sig(ci.def) || continue # println(stderr, "WARNING: compiling `", ci.def, "` which isn't expected to be called.")
- ccall(:jl_add_codeinst_to_jit, Cvoid, (Any, Any), ci, src)
- end
endtime = time()
println("Base.Compiler ──── ", sub_float(endtime,starttime), " seconds")
end
diff --git a/Compiler/src/cicache.jl b/Compiler/src/cicache.jl
index 2893be2787b29..9c528bc0ae822 100644
--- a/Compiler/src/cicache.jl
+++ b/Compiler/src/cicache.jl
@@ -40,6 +40,14 @@ function intersect(a::WorldRange, b::WorldRange)
return ret
end
+function union(a::WorldRange, b::WorldRange)
+ if b.min_world < a.min_world
+ (b, a) = (a, b)
+ end
+ @assert a.max_world >= b.min_world - 1
+ return WorldRange(a.min_world, b.max_world)
+end
+
"""
struct WorldView
diff --git a/Compiler/src/inferenceresult.jl b/Compiler/src/inferenceresult.jl
index 7da96c4cc2e93..77f897e4035a5 100644
--- a/Compiler/src/inferenceresult.jl
+++ b/Compiler/src/inferenceresult.jl
@@ -183,7 +183,8 @@ function cache_lookup(𝕃::AbstractLattice, mi::MethodInstance, given_argtypes:
method = mi.def::Method
nargtypes = length(given_argtypes)
for cached_result in cache
- cached_result.linfo === mi || @goto next_cache
+ cached_result.tombstone && continue # ignore deleted entries (due to LimitedAccuracy)
+ cached_result.linfo === mi || continue
cache_argtypes = cached_result.argtypes
@assert length(cache_argtypes) == nargtypes "invalid `cache_argtypes` for `mi`"
cache_overridden_by_const = cached_result.overridden_by_const::BitVector
diff --git a/Compiler/src/inferencestate.jl b/Compiler/src/inferencestate.jl
index 0ea0fc684b689..dc9430169ab82 100644
--- a/Compiler/src/inferencestate.jl
+++ b/Compiler/src/inferencestate.jl
@@ -292,7 +292,7 @@ mutable struct InferenceState
# IPO tracking of in-process work, shared with all frames given AbstractInterpreter
callstack #::Vector{AbsIntState}
- parentid::Int # index into callstack of the parent frame that originally added this frame (call frame_parent to extract the current parent of the SCC)
+ parentid::Int # index into callstack of the parent frame that originally added this frame (call cycle_parent to extract the current parent of the SCC)
frameid::Int # index into callstack at which this object is found (or zero, if this is not a cached frame and has no parent)
cycleid::Int # index into the callstack of the topmost frame in the cycle (all frames in the same cycle share the same cycleid)
@@ -302,6 +302,10 @@ mutable struct InferenceState
bestguess #::Type
exc_bestguess
ipo_effects::Effects
+ time_start::UInt64
+ time_caches::Float64
+ time_paused::UInt64
+ time_self_ns::UInt64
#= flags =#
# Whether to restrict inference of abstract call sites to avoid excessive work
@@ -392,6 +396,7 @@ mutable struct InferenceState
currbb, currpc, ip, handler_info, ssavalue_uses, bb_vartables, bb_saw_latestworld, ssavaluetypes, ssaflags, edges, stmt_info,
tasks, pclimitations, limitations, cycle_backedges, callstack, parentid, frameid, cycleid,
result, unreachable, bestguess, exc_bestguess, ipo_effects,
+ _time_ns(), 0.0, 0, 0,
restrict_abstract_call_sites, cache_mode, insert_coverage,
interp)
@@ -570,21 +575,23 @@ function (::ComputeTryCatch{Handler})(code::Vector{Any}, bbs::Union{Vector{Basic
end
# check if coverage mode is enabled
-function should_insert_coverage(mod::Module, debuginfo::DebugInfo)
- coverage_enabled(mod) && return true
- JLOptions().code_coverage == 3 || return false
+should_insert_coverage(mod::Module, debuginfo::DebugInfo) = should_instrument(mod, debuginfo, true)
+
+function should_instrument(mod::Module, debuginfo::DebugInfo, only_if_affects_optimizer::Bool=false)
+ instrumentation_enabled(mod, only_if_affects_optimizer) && return true
+ JLOptions().code_coverage == 3 || JLOptions().malloc_log == 3 || return false
# path-specific coverage mode: if any line falls in a tracked file enable coverage for all
- return _should_insert_coverage(debuginfo)
+ return _should_instrument(debuginfo)
end
-_should_insert_coverage(mod::Symbol) = is_file_tracked(mod)
-_should_insert_coverage(mod::Method) = _should_insert_coverage(mod.file)
-_should_insert_coverage(mod::MethodInstance) = _should_insert_coverage(mod.def)
-_should_insert_coverage(mod::Module) = false
-function _should_insert_coverage(info::DebugInfo)
+_should_instrument(loc::Symbol) = is_file_tracked(loc)
+_should_instrument(loc::Method) = _should_instrument(loc.file)
+_should_instrument(loc::MethodInstance) = _should_instrument(loc.def)
+_should_instrument(loc::Module) = false
+function _should_instrument(info::DebugInfo)
linetable = info.linetable
- linetable === nothing || (_should_insert_coverage(linetable) && return true)
- _should_insert_coverage(info.def) && return true
+ linetable === nothing || (_should_instrument(linetable) && return true)
+ _should_instrument(info.def) && return true
return false
end
@@ -815,6 +822,8 @@ mutable struct IRInterpretationState
const mi::MethodInstance
world::WorldWithRange
curridx::Int
+ time_caches::Float64
+ time_paused::UInt64
const argtypes_refined::Vector{Bool}
const sptypes::Vector{VarState}
const tpdum::TwoPhaseDefUseMap
@@ -849,7 +858,8 @@ mutable struct IRInterpretationState
tasks = WorkThunk[]
edges = Any[]
callstack = AbsIntState[]
- return new(spec_info, ir, mi, WorldWithRange(world, valid_worlds), curridx, argtypes_refined, ir.sptypes, tpdum,
+ return new(spec_info, ir, mi, WorldWithRange(world, valid_worlds),
+ curridx, 0.0, 0, argtypes_refined, ir.sptypes, tpdum,
ssa_refined, lazyreachability, tasks, edges, callstack, 0, 0)
end
end
@@ -908,14 +918,17 @@ function frame_module(sv::AbsIntState)
return def.module
end
-function frame_parent(sv::InferenceState)
+frame_parent(sv::AbsIntState) = sv.parentid == 0 ? nothing : (sv.callstack::Vector{AbsIntState})[sv.parentid]
+
+function cycle_parent(sv::InferenceState)
sv.parentid == 0 && return nothing
callstack = sv.callstack::Vector{AbsIntState}
sv = callstack[sv.cycleid]::InferenceState
sv.parentid == 0 && return nothing
return callstack[sv.parentid]
end
-frame_parent(sv::IRInterpretationState) = sv.parentid == 0 ? nothing : (sv.callstack::Vector{AbsIntState})[sv.parentid]
+cycle_parent(sv::IRInterpretationState) = frame_parent(sv)
+
# add the orphan child to the parent and the parent to the child
function assign_parentchild!(child::InferenceState, parent::AbsIntState)
@@ -986,12 +999,12 @@ ascending the tree from the given `AbsIntState`).
Note that cycles may be visited in any order.
"""
struct AbsIntStackUnwind
- sv::AbsIntState
+ callstack::Vector{AbsIntState}
+ AbsIntStackUnwind(sv::AbsIntState) = new(sv.callstack::Vector{AbsIntState})
end
-iterate(unw::AbsIntStackUnwind) = (unw.sv, length(unw.sv.callstack::Vector{AbsIntState}))
-function iterate(unw::AbsIntStackUnwind, frame::Int)
+function iterate(unw::AbsIntStackUnwind, frame::Int=length(unw.callstack))
frame == 0 && return nothing
- return ((unw.sv.callstack::Vector{AbsIntState})[frame], frame - 1)
+ return (unw.callstack[frame], frame - 1)
end
struct AbsIntCycle
diff --git a/Compiler/src/ssair/EscapeAnalysis.jl b/Compiler/src/ssair/EscapeAnalysis.jl
index af8e9b1a4959e..4ce972937700c 100644
--- a/Compiler/src/ssair/EscapeAnalysis.jl
+++ b/Compiler/src/ssair/EscapeAnalysis.jl
@@ -15,6 +15,7 @@ using Base: Base
# imports
import Base: ==, copy, getindex, setindex!
# usings
+using Core
using Core: Builtin, IntrinsicFunction, SimpleVector, ifelse, sizeof
using Core.IR
using Base: # Base definitions
diff --git a/Compiler/src/ssair/ir.jl b/Compiler/src/ssair/ir.jl
index f86ada2309ddc..e6c8f3a6d2c78 100644
--- a/Compiler/src/ssair/ir.jl
+++ b/Compiler/src/ssair/ir.jl
@@ -581,7 +581,7 @@ function is_relevant_expr(e::Expr)
:foreigncall, :isdefined, :copyast,
:throw_undef_if_not,
:cfunction, :method, :pop_exception,
- :leave,
+ :leave, :const, :globaldecl,
:new_opaque_closure)
end
diff --git a/Compiler/src/ssair/irinterp.jl b/Compiler/src/ssair/irinterp.jl
index a4969e81828cc..3d72da72625be 100644
--- a/Compiler/src/ssair/irinterp.jl
+++ b/Compiler/src/ssair/irinterp.jl
@@ -1,7 +1,7 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license
function collect_limitations!(@nospecialize(typ), ::IRInterpretationState)
- @assert !isa(typ, LimitedAccuracy) "irinterp is unable to handle heavy recursion"
+ @assert !isa(typ, LimitedAccuracy) "irinterp is unable to handle heavy recursion correctly"
return typ
end
@@ -32,7 +32,7 @@ function concrete_eval_invoke(interp::AbstractInterpreter, ci::CodeInstance, arg
end
function abstract_eval_invoke_inst(interp::AbstractInterpreter, inst::Instruction, irsv::IRInterpretationState)
- stmt = inst[:stmt]
+ stmt = inst[:stmt]::Expr
ci = stmt.args[1]
if ci isa MethodInstance
world = frame_world(irsv)
@@ -212,6 +212,7 @@ function reprocess_instruction!(interp::AbstractInterpreter, inst::Instruction,
else
rt = argextype(stmt, irsv.ir)
end
+ @assert !(rt isa LimitedAccuracy)
if rt !== nothing
if has_flag(inst, IR_FLAG_UNUSED)
# Don't bother checking the type if we know it's unused
diff --git a/Compiler/src/ssair/passes.jl b/Compiler/src/ssair/passes.jl
index c9b3d5515caa3..46ed299167060 100644
--- a/Compiler/src/ssair/passes.jl
+++ b/Compiler/src/ssair/passes.jl
@@ -183,7 +183,7 @@ function find_def_for_use(
end
function collect_leaves(compact::IncrementalCompact, @nospecialize(val), @nospecialize(typeconstraint), 𝕃ₒ::AbstractLattice,
- predecessors = ((@nospecialize(def), compact::IncrementalCompact) -> isa(def, PhiNode) ? def.values : nothing))
+ predecessors::Pre = ((@nospecialize(def), compact::IncrementalCompact) -> isa(def, PhiNode) ? def.values : nothing)) where {Pre}
if isa(val, Union{OldSSAValue, SSAValue})
val, typeconstraint = simple_walk_constraint(compact, val, typeconstraint)
end
@@ -271,7 +271,7 @@ Starting at `val` walk use-def chains to get all the leaves feeding into this `v
`predecessors(def, compact)` is a callback which should return the set of possible
predecessors for a "phi-like" node (PhiNode or Core.ifelse) or `nothing` otherwise.
"""
-function walk_to_defs(compact::IncrementalCompact, @nospecialize(defssa), @nospecialize(typeconstraint), predecessors, 𝕃ₒ::AbstractLattice)
+function walk_to_defs(compact::IncrementalCompact, @nospecialize(defssa), @nospecialize(typeconstraint), predecessors::Pre, 𝕃ₒ::AbstractLattice) where {Pre}
visited_philikes = AnySSAValue[]
isa(defssa, AnySSAValue) || return Any[defssa], visited_philikes
def = compact[defssa][:stmt]
@@ -1027,17 +1027,19 @@ end
sig = sig.body
isa(sig, DataType) || return nothing
sig.name === Tuple.name || return nothing
- length(sig.parameters) >= 1 || return nothing
+ sig_parameters = sig.parameters::SimpleVector
+ length_sig_parameters = length(sig_parameters)
+ length_sig_parameters >= 1 || return nothing
- i = let sig=sig
- findfirst(j::Int->has_typevar(sig.parameters[j], tvar), 1:length(sig.parameters))
+ function has_typevar_closure(j::Int)
+ has_typevar(sig_parameters[j], tvar)
end
+
+ i = findfirst(has_typevar_closure, 1:length_sig_parameters)
i === nothing && return nothing
- let sig=sig
- any(j::Int->has_typevar(sig.parameters[j], tvar), i+1:length(sig.parameters))
- end && return nothing
+ any(has_typevar_closure, i+1:length_sig_parameters) && return nothing
- arg = sig.parameters[i]
+ arg = sig_parameters[i]
rarg = def.args[2 + i]
isa(rarg, SSAValue) || return nothing
diff --git a/Compiler/src/ssair/show.jl b/Compiler/src/ssair/show.jl
index e63d7b5cf640e..0688c02eb6440 100644
--- a/Compiler/src/ssair/show.jl
+++ b/Compiler/src/ssair/show.jl
@@ -67,7 +67,7 @@ function builtin_call_has_dispatch(
return true
end
end
- elseif (f === Core._apply_pure || f === Core._call_in_world || f === Core._call_in_world_total || f === Core._call_latest)
+ elseif (f === Core.invoke_in_world || f === Core._call_in_world_total || f === Core.invokelatest)
# These apply-like builtins are effectively dynamic calls
return true
end
diff --git a/Compiler/src/ssair/verify.jl b/Compiler/src/ssair/verify.jl
index 2d6d59cc4e22b..2b8f89173911a 100644
--- a/Compiler/src/ssair/verify.jl
+++ b/Compiler/src/ssair/verify.jl
@@ -1,7 +1,8 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license
+irshow_was_loaded() = invokelatest(isdefined, Compiler.IRShow, :debuginfo_firstline)
function maybe_show_ir(ir::IRCode)
- if isdefined(Core, :Main) && isdefined(Core.Main, :Base)
+ if irshow_was_loaded()
# ensure we use I/O that does not yield, as this gets called during compilation
invokelatest(Core.Main.Base.show, Core.stdout, "text/plain", ir)
else
@@ -61,18 +62,14 @@ function check_op(ir::IRCode, domtree::DomTree, @nospecialize(op), use_bb::Int,
raise_error()
end
elseif isa(op, GlobalRef)
- force_binding_resolution!(op, min_world(ir.valid_worlds))
- bpart = lookup_binding_partition(min_world(ir.valid_worlds), op)
- while is_some_imported(binding_kind(bpart)) && max_world(ir.valid_worlds) <= bpart.max_world
- imported_binding = partition_restriction(bpart)::Core.Binding
- bpart = lookup_binding_partition(min_world(ir.valid_worlds), imported_binding)
- end
- if (!is_defined_const_binding(binding_kind(bpart)) || (bpart.max_world < max_world(ir.valid_worlds))) &&
- (op.mod !== Core) && (op.mod !== Base)
- # Core and Base are excluded because the frontend uses them for intrinsics, etc.
- # TODO: Decide which way to go with these.
- @verify_error "Unbound or partitioned GlobalRef not allowed in value position"
- raise_error()
+ if op.mod !== Core && op.mod !== Base
+ (valid_worlds, alldef) = scan_leaf_partitions(nothing, op, WorldWithRange(min_world(ir.valid_worlds), ir.valid_worlds)) do _, _, bpart
+ is_defined_const_binding(binding_kind(bpart))
+ end
+ if !alldef || max_world(valid_worlds) < max_world(ir.valid_worlds) || min_world(valid_worlds) > min_world(ir.valid_worlds)
+ @verify_error "Unbound or partitioned GlobalRef not allowed in value position"
+ raise_error()
+ end
end
elseif isa(op, Expr)
# Only Expr(:boundscheck) is allowed in value position
@@ -105,15 +102,16 @@ function count_int(val::Int, arr::Vector{Int})
n
end
+_debuginfo_firstline(debuginfo::Union{DebugInfo,DebugInfoStream}) = IRShow.debuginfo_firstline(debuginfo)
function verify_ir(ir::IRCode, print::Bool=true,
allow_frontend_forms::Bool=false,
𝕃ₒ::AbstractLattice = SimpleInferenceLattice.instance,
mi::Union{Nothing,MethodInstance}=nothing)
function raise_error()
error_args = Any["IR verification failed."]
- if isdefined(Core, :Main) && isdefined(Core.Main, :Base)
+ if irshow_was_loaded()
# ensure we use I/O that does not yield, as this gets called during compilation
- firstline = invokelatest(IRShow.debuginfo_firstline, ir.debuginfo)
+ firstline = invokelatest(_debuginfo_firstline, ir.debuginfo)
else
firstline = nothing
end
diff --git a/Compiler/src/stmtinfo.jl b/Compiler/src/stmtinfo.jl
index 58c7c7b3fea11..6a85bc6605d3f 100644
--- a/Compiler/src/stmtinfo.jl
+++ b/Compiler/src/stmtinfo.jl
@@ -490,9 +490,7 @@ perform such accesses.
"""
struct GlobalAccessInfo <: CallInfo
b::Core.Binding
- bpart::Core.BindingPartition
end
-GlobalAccessInfo(::Core.Binding, ::Nothing) = NoCallInfo()
function add_edges_impl(edges::Vector{Any}, info::GlobalAccessInfo)
push!(edges, info.b)
end
diff --git a/Compiler/src/tfuncs.jl b/Compiler/src/tfuncs.jl
index 50b88bb0222ce..e9fe671b2f781 100644
--- a/Compiler/src/tfuncs.jl
+++ b/Compiler/src/tfuncs.jl
@@ -449,6 +449,10 @@ end
return Const(true)
end
end
+ # datatype_fieldcount is what `fieldcount` uses internally
+ # and returns nothing (!==0) for non-definite field counts.
+ elseif datatype_fieldcount(a1) === 0
+ return Const(false)
end
elseif isa(a1, Union)
# Results can only be `Const` or `Bool`
@@ -572,6 +576,16 @@ end
add_tfunc(nfields, 1, 1, nfields_tfunc, 1)
add_tfunc(Core._expr, 1, INT_INF, @nospecs((𝕃::AbstractLattice, args...)->Expr), 100)
add_tfunc(svec, 0, INT_INF, @nospecs((𝕃::AbstractLattice, args...)->SimpleVector), 20)
+@nospecs function _svec_ref_tfunc(𝕃::AbstractLattice, s, i)
+ if isa(s, Const) && isa(i, Const)
+ s, i = s.val, i.val
+ if isa(s, SimpleVector) && isa(i, Int)
+ return 1 ≤ i ≤ length(s) ? Const(s[i]) : Bottom
+ end
+ end
+ return Any
+end
+add_tfunc(Core._svec_ref, 2, 2, _svec_ref_tfunc, 1)
@nospecs function typevar_tfunc(𝕃::AbstractLattice, n, lb_arg, ub_arg)
lb = Union{}
ub = Any
@@ -1073,17 +1087,15 @@ end
end
@nospecs function getfield_tfunc(𝕃::AbstractLattice, s00, name, boundscheck_or_order)
- t = isvarargtype(boundscheck_or_order) ? unwrapva(boundscheck_or_order) :
- widenconst(boundscheck_or_order)
- hasintersect(t, Symbol) || hasintersect(t, Bool) || return Bottom
+ if !isvarargtype(boundscheck_or_order)
+ t = widenconst(boundscheck_or_order)
+ hasintersect(t, Symbol) || hasintersect(t, Bool) || return Bottom
+ end
return getfield_tfunc(𝕃, s00, name)
end
@nospecs function getfield_tfunc(𝕃::AbstractLattice, s00, name, order, boundscheck)
hasintersect(widenconst(order), Symbol) || return Bottom
- if isvarargtype(boundscheck)
- t = unwrapva(boundscheck)
- hasintersect(t, Symbol) || hasintersect(t, Bool) || return Bottom
- else
+ if !isvarargtype(boundscheck)
hasintersect(widenconst(boundscheck), Bool) || return Bottom
end
return getfield_tfunc(𝕃, s00, name)
@@ -2338,6 +2350,9 @@ function _builtin_nothrow(𝕃::AbstractLattice, @nospecialize(f::Builtin), argt
elseif f === Core.compilerbarrier
na == 2 || return false
return compilerbarrier_nothrow(argtypes[1], nothing)
+ elseif f === Core._svec_ref
+ na == 2 || return false
+ return _svec_ref_tfunc(𝕃, argtypes[1], argtypes[2]) isa Const
end
return false
end
@@ -2368,7 +2383,9 @@ const _CONSISTENT_BUILTINS = Any[
throw,
Core.throw_methoderror,
setfield!,
- donotdelete
+ donotdelete,
+ memoryrefnew,
+ memoryrefoffset,
]
# known to be effect-free (but not necessarily nothrow)
@@ -2393,6 +2410,7 @@ const _EFFECT_FREE_BUILTINS = [
Core.throw_methoderror,
getglobal,
compilerbarrier,
+ Core._svec_ref,
]
const _INACCESSIBLEMEM_BUILTINS = Any[
@@ -2426,14 +2444,12 @@ const _ARGMEM_BUILTINS = Any[
replacefield!,
setfield!,
swapfield!,
+ Core._svec_ref,
]
const _INCONSISTENT_INTRINSICS = Any[
- Intrinsics.pointerref, # this one is volatile
- Intrinsics.sqrt_llvm_fast, # this one may differ at runtime (by a few ulps)
- Intrinsics.have_fma, # this one depends on the runtime environment
- Intrinsics.cglobal, # cglobal lookup answer changes at runtime
- # ... and list fastmath intrinsics:
+ # all is_pure_intrinsic_infer plus
+ # ... all the unsound fastmath functions which should have been in is_pure_intrinsic_infer
# join(string.("Intrinsics.", sort(filter(endswith("_fast")∘string, names(Core.Intrinsics)))), ",\n")
Intrinsics.add_float_fast,
Intrinsics.div_float_fast,
@@ -2454,6 +2470,43 @@ const _SPECIAL_BUILTINS = Any[
Core._apply_iterate,
]
+# Intrinsics that require all arguments to be floats
+const _FLOAT_INTRINSICS = Any[
+ Intrinsics.neg_float,
+ Intrinsics.add_float,
+ Intrinsics.sub_float,
+ Intrinsics.mul_float,
+ Intrinsics.div_float,
+ Intrinsics.min_float,
+ Intrinsics.max_float,
+ Intrinsics.fma_float,
+ Intrinsics.muladd_float,
+ Intrinsics.neg_float_fast,
+ Intrinsics.add_float_fast,
+ Intrinsics.sub_float_fast,
+ Intrinsics.mul_float_fast,
+ Intrinsics.div_float_fast,
+ Intrinsics.min_float_fast,
+ Intrinsics.max_float_fast,
+ Intrinsics.eq_float,
+ Intrinsics.ne_float,
+ Intrinsics.lt_float,
+ Intrinsics.le_float,
+ Intrinsics.eq_float_fast,
+ Intrinsics.ne_float_fast,
+ Intrinsics.lt_float_fast,
+ Intrinsics.le_float_fast,
+ Intrinsics.fpiseq,
+ Intrinsics.abs_float,
+ Intrinsics.copysign_float,
+ Intrinsics.ceil_llvm,
+ Intrinsics.floor_llvm,
+ Intrinsics.trunc_llvm,
+ Intrinsics.rint_llvm,
+ Intrinsics.sqrt_llvm,
+ Intrinsics.sqrt_llvm_fast
+]
+
# Types compatible with fpext/fptrunc
const CORE_FLOAT_TYPES = Union{Core.BFloat16, Float16, Float32, Float64}
@@ -2575,9 +2628,7 @@ function builtin_effects(𝕃::AbstractLattice, @nospecialize(f::Builtin), argty
else
if contains_is(_CONSISTENT_BUILTINS, f)
consistent = ALWAYS_TRUE
- elseif f === memoryrefnew || f === memoryrefoffset
- consistent = ALWAYS_TRUE
- elseif f === memoryrefget || f === memoryrefset! || f === memoryref_isassigned
+ elseif f === memoryrefget || f === memoryrefset! || f === memoryref_isassigned || f === Core._svec_ref
consistent = CONSISTENT_IF_INACCESSIBLEMEMONLY
elseif f === Core._typevar || f === Core.memorynew
consistent = CONSISTENT_IF_NOTRETURNED
@@ -2763,6 +2814,8 @@ _istypemin(@nospecialize x) = !_iszero(x) && Intrinsics.neg_int(x) === x
function builtin_exct(𝕃::AbstractLattice, @nospecialize(f::Builtin), argtypes::Vector{Any}, @nospecialize(rt))
if isa(f, IntrinsicFunction)
return intrinsic_exct(𝕃, f, argtypes)
+ elseif f === Core._svec_ref
+ return BoundsError
end
return Any
end
@@ -2871,7 +2924,8 @@ function intrinsic_exct(𝕃::AbstractLattice, f::IntrinsicFunction, argtypes::V
return ErrorException
end
- # fpext and fptrunc have further restrictions on the allowed types.
+ # fpext, fptrunc, fptoui, fptosi, uitofp, and sitofp have further
+ # restrictions on the allowed types.
if f === Intrinsics.fpext &&
!(ty <: CORE_FLOAT_TYPES && xty <: CORE_FLOAT_TYPES && Core.sizeof(ty) > Core.sizeof(xty))
return ErrorException
@@ -2880,6 +2934,12 @@ function intrinsic_exct(𝕃::AbstractLattice, f::IntrinsicFunction, argtypes::V
!(ty <: CORE_FLOAT_TYPES && xty <: CORE_FLOAT_TYPES && Core.sizeof(ty) < Core.sizeof(xty))
return ErrorException
end
+ if (f === Intrinsics.fptoui || f === Intrinsics.fptosi) && !(xty <: CORE_FLOAT_TYPES)
+ return ErrorException
+ end
+ if (f === Intrinsics.uitofp || f === Intrinsics.sitofp) && !(ty <: CORE_FLOAT_TYPES)
+ return ErrorException
+ end
return Union{}
end
@@ -2892,11 +2952,15 @@ function intrinsic_exct(𝕃::AbstractLattice, f::IntrinsicFunction, argtypes::V
return Union{}
end
- # The remaining intrinsics are math/bits/comparison intrinsics. They work on all
- # primitive types of the same type.
+ # The remaining intrinsics are math/bits/comparison intrinsics.
+ # All the non-floating point intrinsics work on primitive values of the same type.
isshift = f === shl_int || f === lshr_int || f === ashr_int
argtype1 = widenconst(argtypes[1])
isprimitivetype(argtype1) || return ErrorException
+ if contains_is(_FLOAT_INTRINSICS, f)
+ argtype1 <: CORE_FLOAT_TYPES || return ErrorException
+ end
+
for i = 2:length(argtypes)
argtype = widenconst(argtypes[i])
if isshift ? !isprimitivetype(argtype) : argtype !== argtype1
@@ -2910,36 +2974,48 @@ function intrinsic_nothrow(f::IntrinsicFunction, argtypes::Vector{Any})
return intrinsic_exct(SimpleInferenceLattice.instance, f, argtypes) === Union{}
end
-# whether `f` is pure for inference
-function is_pure_intrinsic_infer(f::IntrinsicFunction)
- return !(f === Intrinsics.pointerref || # this one is volatile
- f === Intrinsics.pointerset || # this one is never effect-free
- f === Intrinsics.llvmcall || # this one is never effect-free
- f === Intrinsics.sqrt_llvm_fast || # this one may differ at runtime (by a few ulps)
- f === Intrinsics.have_fma || # this one depends on the runtime environment
- f === Intrinsics.cglobal) # cglobal lookup answer changes at runtime
+function _is_effect_free_infer(f::IntrinsicFunction)
+ return !(f === Intrinsics.pointerset ||
+ f === Intrinsics.atomic_pointerref ||
+ f === Intrinsics.atomic_pointerset ||
+ f === Intrinsics.atomic_pointerswap ||
+ # f === Intrinsics.atomic_pointermodify ||
+ f === Intrinsics.atomic_pointerreplace ||
+ f === Intrinsics.atomic_fence)
end
-# whether `f` is effect free if nothrow
-function intrinsic_effect_free_if_nothrow(@nospecialize f)
- return f === Intrinsics.pointerref ||
- f === Intrinsics.have_fma ||
- is_pure_intrinsic_infer(f)
+# whether `f` is pure for inference
+function is_pure_intrinsic_infer(f::IntrinsicFunction, is_effect_free::Union{Nothing,Bool}=nothing)
+ if is_effect_free === nothing
+ is_effect_free = _is_effect_free_infer(f)
+ end
+ return is_effect_free && !(
+ f === Intrinsics.llvmcall || # can do arbitrary things
+ f === Intrinsics.atomic_pointermodify || # can do arbitrary things
+ f === Intrinsics.pointerref || # this one is volatile
+ f === Intrinsics.sqrt_llvm_fast || # this one may differ at runtime (by a few ulps)
+ f === Intrinsics.have_fma || # this one depends on the runtime environment
+ f === Intrinsics.cglobal) # cglobal lookup answer changes at runtime
end
function intrinsic_effects(f::IntrinsicFunction, argtypes::Vector{Any})
if f === Intrinsics.llvmcall
# llvmcall can do arbitrary things
return Effects()
+ elseif f === atomic_pointermodify
+ # atomic_pointermodify has memory effects, plus any effects from the ModifyOpInfo
+ return Effects()
end
- if contains_is(_INCONSISTENT_INTRINSICS, f)
- consistent = ALWAYS_FALSE
- else
+ is_effect_free = _is_effect_free_infer(f)
+ effect_free = is_effect_free ? ALWAYS_TRUE : ALWAYS_FALSE
+ if ((is_pure_intrinsic_infer(f, is_effect_free) && !contains_is(_INCONSISTENT_INTRINSICS, f)) ||
+ f === Intrinsics.pointerset || f === Intrinsics.atomic_pointerset || f === Intrinsics.atomic_fence)
consistent = ALWAYS_TRUE
+ else
+ consistent = ALWAYS_FALSE
end
- effect_free = !(f === Intrinsics.pointerset) ? ALWAYS_TRUE : ALWAYS_FALSE
nothrow = intrinsic_nothrow(f, argtypes)
- inaccessiblememonly = ALWAYS_TRUE
+ inaccessiblememonly = is_effect_free && !(f === Intrinsics.pointerref) ? ALWAYS_TRUE : ALWAYS_FALSE
return Effects(EFFECTS_TOTAL; consistent, effect_free, nothrow, inaccessiblememonly)
end
diff --git a/Compiler/src/typeinfer.jl b/Compiler/src/typeinfer.jl
index 96f58943e3255..debae93089c02 100644
--- a/Compiler/src/typeinfer.jl
+++ b/Compiler/src/typeinfer.jl
@@ -10,8 +10,9 @@ being used for this purpose alone.
"""
module Timings
+using ..Core
using ..Compiler: -, +, :, Vector, length, first, empty!, push!, pop!, @inline,
- @inbounds, copy, backtrace
+ @inbounds, copy, backtrace, _time_ns
# What we record for any given frame we infer during type inference.
struct InferenceFrameInfo
@@ -52,8 +53,6 @@ end
Timing(mi_info, start_time, cur_start_time, time, children) = Timing(mi_info, start_time, cur_start_time, time, children, nothing)
Timing(mi_info, start_time) = Timing(mi_info, start_time, start_time, UInt64(0), Timing[])
-_time_ns() = ccall(:jl_hrtime, UInt64, ())
-
# We keep a stack of the Timings for each of the MethodInstances currently being timed.
# Since type inference currently operates via a depth-first search (during abstract
# evaluation), this vector operates like a call stack. The last node in _timings is the
@@ -92,7 +91,7 @@ If set to `true`, record per-method-instance timings within type inference in th
__set_measure_typeinf(onoff::Bool) = __measure_typeinf__[] = onoff
const __measure_typeinf__ = RefValue{Bool}(false)
-function finish!(interp::AbstractInterpreter, caller::InferenceState)
+function finish!(interp::AbstractInterpreter, caller::InferenceState, validation_world::UInt, time_before::UInt64)
result = caller.result
opt = result.src
if opt isa OptimizationState
@@ -108,12 +107,7 @@ function finish!(interp::AbstractInterpreter, caller::InferenceState)
ci = result.ci
# if we aren't cached, we don't need this edge
# but our caller might, so let's just make it anyways
- if last(result.valid_worlds) >= get_world_counter()
- # TODO: this should probably come after all store_backedges (after optimizations) for the entire graph in finish_cycle
- # since we should be requiring that all edges first get their backedges set, as a batch
- result.valid_worlds = WorldRange(first(result.valid_worlds), typemax(UInt))
- end
- if last(result.valid_worlds) == typemax(UInt)
+ if last(result.valid_worlds) >= validation_world
# if we can record all of the backedges in the global reverse-cache,
# we can now widen our applicability in the global cache too
store_backedges(ci, edges)
@@ -143,19 +137,23 @@ function finish!(interp::AbstractInterpreter, caller::InferenceState)
if !@isdefined di
di = DebugInfo(result.linfo)
end
- ccall(:jl_update_codeinst, Cvoid, (Any, Any, Int32, UInt, UInt, UInt32, Any, Any, Any),
+ time_now = _time_ns()
+ time_self_ns = caller.time_self_ns + (time_now - time_before)
+ time_total = (time_now - caller.time_start - caller.time_paused) * 1e-9
+ ccall(:jl_update_codeinst, Cvoid, (Any, Any, Int32, UInt, UInt, UInt32, Any, Float64, Float64, Float64, Any, Any),
ci, inferred_result, const_flag, first(result.valid_worlds), last(result.valid_worlds), encode_effects(result.ipo_effects),
- result.analysis_results, di, edges)
+ result.analysis_results, time_total, caller.time_caches, time_self_ns * 1e-9, di, edges)
engine_reject(interp, ci)
- if !discard_src && isdefined(interp, :codegen) && uncompressed isa CodeInfo
+ codegen = codegen_cache(interp)
+ if !discard_src && codegen !== nothing && uncompressed isa CodeInfo
# record that the caller could use this result to generate code when required, if desired, to avoid repeating n^2 work
- interp.codegen[ci] = uncompressed
+ codegen[ci] = uncompressed
if bootstrapping_compiler && inferred_result == nothing
# This is necessary to get decent bootstrapping performance
# when compiling the compiler to inject everything eagerly
# where codegen can start finding and using it right away
mi = result.linfo
- if mi.def isa Method && isa_compileable_sig(mi)
+ if mi.def isa Method && isa_compileable_sig(mi) && is_cached(caller)
ccall(:jl_add_codeinst_to_jit, Cvoid, (Any, Any), ci, uncompressed)
end
end
@@ -186,23 +184,31 @@ function finish!(interp::AbstractInterpreter, mi::MethodInstance, ci::CodeInstan
end
ccall(:jl_fill_codeinst, Cvoid, (Any, Any, Any, Any, Int32, UInt, UInt, UInt32, Any, Any, Any),
ci, rettype, exctype, nothing, const_flags, min_world, max_world, ipo_effects, nothing, di, edges)
- ccall(:jl_update_codeinst, Cvoid, (Any, Any, Int32, UInt, UInt, UInt32, Any, Any, Any),
- ci, nothing, const_flag, min_world, max_world, ipo_effects, nothing, di, edges)
+ ccall(:jl_update_codeinst, Cvoid, (Any, Any, Int32, UInt, UInt, UInt32, Any, Float64, Float64, Float64, Any, Any),
+ ci, nothing, const_flag, min_world, max_world, ipo_effects, nothing, 0.0, 0.0, 0.0, di, edges)
code_cache(interp)[mi] = ci
- if isdefined(interp, :codegen)
- interp.codegen[ci] = src
+ codegen = codegen_cache(interp)
+ if codegen !== nothing
+ codegen[ci] = src
end
engine_reject(interp, ci)
return nothing
end
-function finish_nocycle(::AbstractInterpreter, frame::InferenceState)
- finishinfer!(frame, frame.interp)
+function finish_nocycle(::AbstractInterpreter, frame::InferenceState, time_before::UInt64)
+ finishinfer!(frame, frame.interp, frame.cycleid)
opt = frame.result.src
if opt isa OptimizationState # implies `may_optimize(caller.interp) === true`
optimize(frame.interp, opt, frame.result)
end
- finish!(frame.interp, frame)
+ validation_world = get_world_counter()
+ finish!(frame.interp, frame, validation_world, time_before)
+ if isdefined(frame.result, :ci)
+ # After validation, under the world_counter_lock, set max_world to typemax(UInt) for all dependencies
+ # (recursively). From that point onward the ordinary backedge mechanism is responsible for maintaining
+ # validity.
+ ccall(:jl_promote_ci_to_current, Cvoid, (Any, UInt), frame.result.ci, validation_world)
+ end
if frame.cycleid != 0
frames = frame.callstack::Vector{AbsIntState}
@assert frames[end] === frame
@@ -211,7 +217,7 @@ function finish_nocycle(::AbstractInterpreter, frame::InferenceState)
return nothing
end
-function finish_cycle(::AbstractInterpreter, frames::Vector{AbsIntState}, cycleid::Int)
+function finish_cycle(::AbstractInterpreter, frames::Vector{AbsIntState}, cycleid::Int, time_before::UInt64)
cycle_valid_worlds = WorldRange()
cycle_valid_effects = EFFECTS_TOTAL
for frameid = cycleid:length(frames)
@@ -227,19 +233,50 @@ function finish_cycle(::AbstractInterpreter, frames::Vector{AbsIntState}, cyclei
for frameid = cycleid:length(frames)
caller = frames[frameid]::InferenceState
adjust_cycle_frame!(caller, cycle_valid_worlds, cycle_valid_effects)
- finishinfer!(caller, caller.interp)
+ finishinfer!(caller, caller.interp, cycleid)
+ time_now = _time_ns()
+ caller.time_self_ns += (time_now - time_before)
+ time_before = time_now
end
+ time_caches = 0.0 # the total and adjusted time of every entry in the cycle are the same
+ time_paused = UInt64(0)
for frameid = cycleid:length(frames)
caller = frames[frameid]::InferenceState
opt = caller.result.src
if opt isa OptimizationState # implies `may_optimize(caller.interp) === true`
optimize(caller.interp, opt, caller.result)
+ time_now = _time_ns()
+ caller.time_self_ns += (time_now - time_before)
+ time_before = time_now
end
- end
+ time_caches += caller.time_caches
+ time_paused += caller.time_paused
+ caller.time_paused = UInt64(0)
+ caller.time_caches = 0.0
+ end
+ cycletop = frames[cycleid]::InferenceState
+ time_start = cycletop.time_start
+ validation_world = get_world_counter()
+ cis = CodeInstance[]
for frameid = cycleid:length(frames)
caller = frames[frameid]::InferenceState
- finish!(caller.interp, caller)
+ caller.time_start = time_start
+ caller.time_caches = time_caches
+ caller.time_paused = time_paused
+ finish!(caller.interp, caller, validation_world, time_before)
+ if isdefined(caller.result, :ci)
+ push!(cis, caller.result.ci)
+ end
end
+ if cycletop.parentid != 0
+ parent = frames[cycletop.parentid]
+ parent.time_caches += time_caches
+ parent.time_paused += time_paused
+ end
+ # After validation, under the world_counter_lock, set max_world to typemax(UInt) for all dependencies
+ # (recursively). From that point onward the ordinary backedge mechanism is responsible for maintaining
+ # validity.
+ ccall(:jl_promote_cis_to_current, Cvoid, (Ptr{CodeInstance}, Csize_t, UInt), cis, length(cis), validation_world)
resize!(frames, cycleid - 1)
return nothing
end
@@ -298,37 +335,38 @@ function cache_result!(interp::AbstractInterpreter, result::InferenceResult, ci:
return true
end
-function cycle_fix_limited(@nospecialize(typ), sv::InferenceState)
+function cycle_fix_limited(@nospecialize(typ), sv::InferenceState, cycleid::Int)
if typ isa LimitedAccuracy
- if sv.parentid === 0
- # we might have introduced a limit marker, but we should know it must be sv and other callers_in_cycle
- #@assert !isempty(callers_in_cycle(sv))
- # FIXME: this assert fails, appearing to indicate there is a bug in filtering this list earlier.
- # In particular (during doctests for example), during inference of
- # show(Base.IOContext{Base.GenericIOBuffer{Memory{UInt8}}}, Base.Multimedia.MIME{:var"text/plain"}, LinearAlgebra.BunchKaufman{Float64, Array{Float64, 2}, Array{Int64, 1}})
- # we observed one of the ssavaluetypes here to be Core.Compiler.LimitedAccuracy(typ=Any, causes=Core.Compiler.IdSet(getproperty(LinearAlgebra.BunchKaufman{Float64, Array{Float64, 2}, Array{Int64, 1}}, Symbol)))
- return typ.typ
- end
- causes = copy(typ.causes)
- delete!(causes, sv)
- for caller in callers_in_cycle(sv)
- delete!(causes, caller)
- end
- if isempty(causes)
- return typ.typ
+ frames = sv.callstack::Vector{AbsIntState}
+ causes = typ.causes
+ for frameid = cycleid:length(frames)
+ caller = frames[frameid]::InferenceState
+ caller in causes || continue
+ causes === typ.causes && (causes = copy(causes))
+ pop!(causes, caller)
+ if isempty(causes)
+ return typ.typ
+ end
end
- if length(causes) != length(typ.causes)
+ @assert sv.parentid != 0
+ if causes !== typ.causes
return LimitedAccuracy(typ.typ, causes)
end
end
return typ
end
-function adjust_effects(ipo_effects::Effects, def::Method)
+function adjust_effects(ipo_effects::Effects, def::Method, world::UInt)
# override the analyzed effects using manually annotated effect settings
override = decode_effects_override(def.purity)
+ valid_worlds = WorldRange(0, typemax(UInt))
if is_effect_overridden(override, :consistent)
- ipo_effects = Effects(ipo_effects; consistent=ALWAYS_TRUE)
+ # See note on `typemax(Int)` instead of `deleted_world` in adjust_effects!
+ override_valid_worlds = WorldRange(def.primary_world, typemax(Int))
+ if world in override_valid_worlds
+ ipo_effects = Effects(ipo_effects; consistent=ALWAYS_TRUE)
+ valid_worlds = override_valid_worlds
+ end
end
if is_effect_overridden(override, :effect_free)
ipo_effects = Effects(ipo_effects; effect_free=ALWAYS_TRUE)
@@ -356,7 +394,7 @@ function adjust_effects(ipo_effects::Effects, def::Method)
if is_effect_overridden(override, :nortcall)
ipo_effects = Effects(ipo_effects; nortcall=true)
end
- return ipo_effects
+ return (ipo_effects, valid_worlds)
end
function adjust_effects(sv::InferenceState)
@@ -410,7 +448,8 @@ function adjust_effects(sv::InferenceState)
# override the analyzed effects using manually annotated effect settings
def = sv.linfo.def
if isa(def, Method)
- ipo_effects = adjust_effects(ipo_effects, def)
+ (ipo_effects, valid_worlds) = adjust_effects(ipo_effects, def, sv.world.this)
+ update_valid_age!(sv, valid_worlds)
end
return ipo_effects
@@ -425,29 +464,32 @@ const empty_edges = Core.svec()
# inference completed on `me`
# update the MethodInstance
-function finishinfer!(me::InferenceState, interp::AbstractInterpreter)
+function finishinfer!(me::InferenceState, interp::AbstractInterpreter, cycleid::Int)
# prepare to run optimization passes on fulltree
@assert isempty(me.ip)
# inspect whether our inference had a limited result accuracy,
# else it may be suitable to cache
- bestguess = me.bestguess = cycle_fix_limited(me.bestguess, me)
- exc_bestguess = me.exc_bestguess = cycle_fix_limited(me.exc_bestguess, me)
+ bestguess = me.bestguess = cycle_fix_limited(me.bestguess, me, cycleid)
+ exc_bestguess = me.exc_bestguess = cycle_fix_limited(me.exc_bestguess, me, cycleid)
limited_ret = bestguess isa LimitedAccuracy || exc_bestguess isa LimitedAccuracy
limited_src = false
- if !limited_ret
+ if limited_ret
+ @assert me.parentid != 0
+ else
gt = me.ssavaluetypes
for j = 1:length(gt)
- gt[j] = gtj = cycle_fix_limited(gt[j], me)
- if gtj isa LimitedAccuracy && me.parentid != 0
+ gt[j] = gtj = cycle_fix_limited(gt[j], me, cycleid)
+ if gtj isa LimitedAccuracy
+ @assert me.parentid != 0
limited_src = true
break
end
end
end
result = me.result
- result.valid_worlds = me.world.valid_worlds
result.result = bestguess
ipo_effects = result.ipo_effects = me.ipo_effects = adjust_effects(me)
+ result.valid_worlds = me.world.valid_worlds
result.exc_result = me.exc_bestguess = refine_exception_type(me.exc_bestguess, ipo_effects)
me.src.rettype = widenconst(ignorelimited(bestguess))
me.src.ssaflags = me.ssaflags
@@ -456,17 +498,17 @@ function finishinfer!(me::InferenceState, interp::AbstractInterpreter)
istoplevel = !(me.linfo.def isa Method)
istoplevel || compute_edges!(me) # don't add backedges to toplevel method instance
- if limited_ret
- # a parent may be cached still, but not this intermediate work:
- # we can throw everything else away now
+ if limited_ret || limited_src
+ # A parent may be cached still, but not this intermediate work:
+ # we can throw everything else away now. Caching anything can confuse later
+ # heuristics to consider it worth trying to pursue compiling this further and
+ # finding infinite work as a result. Avoiding caching helps to ensure there is only
+ # a finite amount of work that can be discovered later (although potentially still a
+ # large multiplier on it).
result.src = nothing
+ result.tombstone = true
me.cache_mode = CACHE_MODE_NULL
set_inlineable!(me.src, false)
- elseif limited_src
- # a type result will be cached still, but not this intermediate work:
- # we can throw everything else away now
- result.src = nothing
- set_inlineable!(me.src, false)
else
# annotate fulltree with type information,
# either because we are the outermost code, or we might use this later
@@ -700,7 +742,7 @@ function merge_call_chain!(::AbstractInterpreter, parent::InferenceState, child:
add_cycle_backedge!(parent, child)
parent.cycleid === ancestorid && break
child = parent
- parent = frame_parent(child)::InferenceState
+ parent = cycle_parent(child)::InferenceState
end
# ensure that walking the callstack has the same cycleid (DAG)
for frameid = reverse(ancestorid:length(frames))
@@ -736,7 +778,7 @@ end
# returned instead.
function resolve_call_cycle!(interp::AbstractInterpreter, mi::MethodInstance, parent::AbsIntState)
# TODO (#48913) implement a proper recursion handling for irinterp:
- # This works currently just because the irinterp code doesn't get used much with
+ # This works most of the time currently just because the irinterp code doesn't get used much with
# `@assume_effects`, so it never sees a cycle normally, but that may not be a sustainable solution.
parent isa InferenceState || return false
frames = parent.callstack::Vector{AbsIntState}
@@ -748,7 +790,7 @@ function resolve_call_cycle!(interp::AbstractInterpreter, mi::MethodInstance, pa
if is_same_frame(interp, mi, frame)
if uncached
# our attempt to speculate into a constant call lead to an undesired self-cycle
- # that cannot be converged: poison our call-stack (up to the discovered duplicate frame)
+ # that cannot be converged: if necessary, poison our call-stack (up to the discovered duplicate frame)
# with the limited flag and abort (set return type to Any) now
poison_callstack!(parent, frame)
return true
@@ -767,9 +809,10 @@ function return_cached_result(interp::AbstractInterpreter, method::Method, codei
rt = cached_return_type(codeinst)
exct = codeinst.exctype
effects = ipo_effects(codeinst)
- edge = codeinst
update_valid_age!(caller, WorldRange(min_world(codeinst), max_world(codeinst)))
- return Future(MethodCallResult(interp, caller, method, rt, exct, effects, edge, edgecycle, edgelimited))
+ caller.time_caches += reinterpret(Float16, codeinst.time_infer_total)
+ caller.time_caches += reinterpret(Float16, codeinst.time_infer_cache_saved)
+ return Future(MethodCallResult(interp, caller, method, rt, exct, effects, codeinst, edgecycle, edgelimited))
end
function MethodCallResult(::AbstractInterpreter, sv::AbsIntState, method::Method,
@@ -851,7 +894,7 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize
end
end
end
- if ccall(:jl_get_module_infer, Cint, (Any,), method.module) == 0
+ if !InferenceParams(interp).force_enable_inference && ccall(:jl_get_module_infer, Cint, (Any,), method.module) == 0
add_remark!(interp, caller, "[typeinf_edge] Inference is disabled for the target module")
return Future(MethodCallResult(interp, caller, method, Any, Any, Effects(), nothing, edgecycle, edgelimited))
end
@@ -865,7 +908,9 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize
if frame === false
# completely new, but check again after reserving in the engine
if cache_mode == CACHE_MODE_GLOBAL
+ reserve_start = _time_ns() # subtract engine_reserve (thread-synchronization) time from callers to avoid double-counting
ci_from_engine = engine_reserve(interp, mi)
+ caller.time_paused += (_time_ns() - reserve_start)
edge_ci = ci_from_engine
codeinst = get(code_cache(interp), mi, nothing)
if codeinst isa CodeInstance # return existing rettype if the code is already inferred
@@ -904,8 +949,13 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize
update_valid_age!(caller, frame.world.valid_worlds)
local isinferred = is_inferred(frame)
local edge = isinferred ? edge_ci : nothing
- local effects = isinferred ? frame.result.ipo_effects : # effects are adjusted already within `finish` for ipo_effects
- adjust_effects(effects_for_cycle(frame.ipo_effects), method)
+ local effects, valid_worlds
+ if isinferred
+ effects = frame.result.ipo_effects # effects are adjusted already within `finish` for ipo_effects
+ else
+ (effects, valid_worlds) = adjust_effects(effects_for_cycle(frame.ipo_effects), method, frame.world.this)
+ update_valid_age!(caller, valid_worlds)
+ end
local bestguess = frame.bestguess
local exc_bestguess = refine_exception_type(frame.exc_bestguess, effects)
# propagate newly inferred source to the inliner, allowing efficient inlining w/o deserialization:
@@ -928,7 +978,8 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize
# return the current knowledge about this cycle
frame = frame::InferenceState
update_valid_age!(caller, frame.world.valid_worlds)
- effects = adjust_effects(effects_for_cycle(frame.ipo_effects), method)
+ (effects, valid_worlds) = adjust_effects(effects_for_cycle(frame.ipo_effects), method, frame.world.this)
+ update_valid_age!(caller, valid_worlds)
bestguess = frame.bestguess
exc_bestguess = refine_exception_type(frame.exc_bestguess, effects)
return Future(MethodCallResult(interp, caller, method, bestguess, exc_bestguess, effects, nothing, edgecycle, edgelimited))
@@ -1060,10 +1111,10 @@ end
"""
SOURCE_MODE_NOT_REQUIRED
-Indicates to inference that the source is not required and the only fields
-of the resulting `CodeInstance` that the caller is interested in are types
-and effects. Inference is still free to create a CodeInstance with source,
-but is not required to do so.
+Indicates to inference that the source is not required and the only fields of
+the resulting `CodeInstance` that the caller is interested in are return or
+exception types and IPO effects. Inference is still free to create source for
+it or add it to the JIT even, but is not required or expected to do so.
"""
const SOURCE_MODE_NOT_REQUIRED = 0x0
@@ -1071,28 +1122,51 @@ const SOURCE_MODE_NOT_REQUIRED = 0x0
SOURCE_MODE_ABI
Indicates to inference that it should return a CodeInstance that can
-either be `->invoke`'d (because it has already been compiled or because
-it has constabi) or one that can be made so by compiling its `->inferred`
-field.
-
-N.B.: The `->inferred` field is volatile and the compiler may delete it.
+be `->invoke`'d (because it has already been compiled).
"""
const SOURCE_MODE_ABI = 0x1
"""
- ci_has_abi(code::CodeInstance)
+ SOURCE_MODE_GET_SOURCE
+
+Indicates to inference that it should return a CodeInstance after it has
+prepared interp to be able to provide source code for it.
+"""
+const SOURCE_MODE_GET_SOURCE = 0xf
+
+"""
+ ci_has_abi(interp::AbstractInterpreter, code::CodeInstance)
-Determine whether this CodeInstance is something that could be invoked if we gave it
-to the runtime system (either because it already has an ->invoke ptr, or
-because it has source that could be compiled). Note that this information may
-be stale by the time the user see it, so the user will need to perform their
-own checks if they actually need the abi from it.
+Determine whether this CodeInstance is something that could be invoked if
+interp gave it to the runtime system (either because it already has an ->invoke
+ptr, or because interp has source that could be compiled).
"""
-function ci_has_abi(code::CodeInstance)
+function ci_has_abi(interp::AbstractInterpreter, code::CodeInstance)
(@atomic :acquire code.invoke) !== C_NULL && return true
+ return ci_has_source(interp, code)
+end
+
+"""
+ ci_has_source(interp::AbstractInterpreter, code::CodeInstance)
+
+Determine whether this CodeInstance is something that could be compiled from
+source that interp has.
+"""
+function ci_has_source(interp::AbstractInterpreter, code::CodeInstance)
+ codegen = codegen_cache(interp)
+ codegen === nothing && return false
+ use_const_api(code) && return true
+ haskey(codegen, code) && return true
inf = @atomic :monotonic code.inferred
- if code.owner === nothing ? (isa(inf, CodeInfo) || isa(inf, String)) : inf !== nothing
- # interp.codegen[code] = maybe_uncompress(code, inf) # TODO: the correct way to ensure this information doesn't become stale would be to push it into the stable codegen cache
+ if isa(inf, String)
+ inf = _uncompressed_ir(code, inf)
+ end
+ if code.owner === nothing
+ if isa(inf, CodeInfo)
+ codegen[code] = inf
+ return true
+ end
+ elseif inf !== nothing
return true
end
return false
@@ -1102,9 +1176,10 @@ function ci_has_invoke(code::CodeInstance)
return (@atomic :monotonic code.invoke) !== C_NULL
end
-function ci_meets_requirement(code::CodeInstance, source_mode::UInt8)
+function ci_meets_requirement(interp::AbstractInterpreter, code::CodeInstance, source_mode::UInt8)
source_mode == SOURCE_MODE_NOT_REQUIRED && return true
- source_mode == SOURCE_MODE_ABI && return ci_has_abi(code)
+ source_mode == SOURCE_MODE_ABI && return ci_has_abi(interp, code)
+ source_mode == SOURCE_MODE_GET_SOURCE && return ci_has_source(interp, code)
return false
end
@@ -1114,7 +1189,7 @@ function typeinf_ext(interp::AbstractInterpreter, mi::MethodInstance, source_mod
let code = get(code_cache(interp), mi, nothing)
if code isa CodeInstance
# see if this code already exists in the cache
- if ci_meets_requirement(code, source_mode)
+ if ci_meets_requirement(interp, code, source_mode)
ccall(:jl_typeinf_timing_end, Cvoid, (UInt64,), start_time)
return code
end
@@ -1126,22 +1201,24 @@ function typeinf_ext(interp::AbstractInterpreter, mi::MethodInstance, source_mod
let code = get(code_cache(interp), mi, nothing)
if code isa CodeInstance
# see if this code already exists in the cache
- if ci_meets_requirement(code, source_mode)
+ if ci_meets_requirement(interp, code, source_mode)
engine_reject(interp, ci)
ccall(:jl_typeinf_timing_end, Cvoid, (UInt64,), start_time)
return code
end
end
end
- if isa(def, Method) && ccall(:jl_get_module_infer, Cint, (Any,), def.module) == 0
- src = retrieve_code_info(mi, get_inference_world(interp))
- if src isa CodeInfo
- finish!(interp, mi, ci, src)
- else
- engine_reject(interp, ci)
+ if !InferenceParams(interp).force_enable_inference
+ if isa(def, Method) && ccall(:jl_get_module_infer, Cint, (Any,), def.module) == 0
+ src = retrieve_code_info(mi, get_inference_world(interp))
+ if src isa CodeInfo
+ finish!(interp, mi, ci, src)
+ else
+ engine_reject(interp, ci)
+ end
+ ccall(:jl_typeinf_timing_end, Cvoid, (UInt64,), start_time)
+ return ci
end
- ccall(:jl_typeinf_timing_end, Cvoid, (UInt64,), start_time)
- return ci
end
result = InferenceResult(mi, typeinf_lattice(interp))
result.ci = ci
@@ -1155,15 +1232,11 @@ function typeinf_ext(interp::AbstractInterpreter, mi::MethodInstance, source_mod
ccall(:jl_typeinf_timing_end, Cvoid, (UInt64,), start_time)
ci = result.ci # reload from result in case it changed
+ codegen = codegen_cache(interp)
@assert frame.cache_mode != CACHE_MODE_NULL
- @assert is_result_constabi_eligible(result) || (!isdefined(interp, :codegen) || haskey(interp.codegen, ci))
+ @assert is_result_constabi_eligible(result) || codegen === nothing || haskey(codegen, ci)
@assert is_result_constabi_eligible(result) == use_const_api(ci)
@assert isdefined(ci, :inferred) "interpreter did not fulfill our expectations"
- if !is_cached(frame) && source_mode == SOURCE_MODE_ABI
- # XXX: jl_type_infer somewhat ambiguously assumes this must be cached
- # XXX: this should be using the CI from the cache, if possible instead: haskey(cache, mi) && (ci = cache[mi])
- code_cache(interp)[mi] = ci
- end
return ci
end
@@ -1177,160 +1250,281 @@ end
typeinf_type(interp::AbstractInterpreter, match::MethodMatch) =
typeinf_type(interp, specialize_method(match))
function typeinf_type(interp::AbstractInterpreter, mi::MethodInstance)
- # n.b.: this could be replaced with @something(typeinf_ext(interp, mi, SOURCE_MODE_NOT_REQUIRED), return nothing).rettype
- start_time = ccall(:jl_typeinf_timing_begin, UInt64, ())
- let code = get(code_cache(interp), mi, nothing)
- if code isa CodeInstance
- # see if this rettype already exists in the cache
- ccall(:jl_typeinf_timing_end, Cvoid, (UInt64,), start_time)
- return code.rettype
- end
- end
- ci = engine_reserve(interp, mi)
- let code = get(code_cache(interp), mi, nothing)
- if code isa CodeInstance
- engine_reject(interp, ci)
- # see if this rettype already exists in the cache
- ccall(:jl_typeinf_timing_end, Cvoid, (UInt64,), start_time)
- return code.rettype
- end
- end
- result = InferenceResult(mi, typeinf_lattice(interp))
- result.ci = ci
- frame = InferenceState(result, #=cache_mode=#:global, interp)
- if frame === nothing
- engine_reject(interp, ci)
+ ci = typeinf_ext(interp, mi, SOURCE_MODE_NOT_REQUIRED)
+ ci isa CodeInstance || return nothing
+ return ci.rettype
+end
+
+# Resolve a call, as described by `argtype` to a single matching
+# Method and return a compilable MethodInstance for the call, if
+# it will be runtime-dispatched to exactly that MethodInstance
+function compileable_specialization_for_call(interp::AbstractInterpreter, @nospecialize(argtype))
+ mt = ccall(:jl_method_table_for, Any, (Any,), argtype)
+ if mt === nothing
+ # this would require scanning all method tables, so give up instead
return nothing
end
- typeinf(interp, frame)
- ccall(:jl_typeinf_timing_end, Cvoid, (UInt64,), start_time)
- is_inferred(result) || return nothing
- return widenconst(ignorelimited(result.result))
+
+ matches = findall(argtype, method_table(interp); limit = 1)
+ matches === nothing && return nothing
+ length(matches.matches) == 0 && return nothing
+ match = only(matches.matches)
+
+ compileable_atype = get_compileable_sig(match.method, match.spec_types, match.sparams)
+ compileable_atype === nothing && return nothing
+ if match.spec_types !== compileable_atype
+ sp_ = ccall(:jl_type_intersection_with_env, Any, (Any, Any), compileable_atype, match.method.sig)::SimpleVector
+ sparams = sp_[2]::SimpleVector
+ mi = specialize_method(match.method, compileable_atype, sparams)
+ else
+ mi = specialize_method(match.method, compileable_atype, match.sparams)
+ end
+
+ return mi
end
+const QueueItems = Union{CodeInstance,MethodInstance,SimpleVector}
+
+struct CompilationQueue
+ tocompile::Vector{QueueItems}
+ inspected::IdSet{QueueItems}
+ interp::Union{AbstractInterpreter,Nothing}
+
+ CompilationQueue(;
+ interp::Union{AbstractInterpreter,Nothing}
+ ) = new(QueueItems[], IdSet{QueueItems}(), interp)
+
+ CompilationQueue(queue::CompilationQueue;
+ interp::Union{AbstractInterpreter,Nothing}
+ ) = new(empty!(queue.tocompile), empty!(queue.inspected), interp)
+end
+
+Base.push!(queue::CompilationQueue, item) = push!(queue.tocompile, item)
+Base.append!(queue::CompilationQueue, items) = append!(queue.tocompile, items)
+Base.pop!(queue::CompilationQueue) = pop!(queue.tocompile)
+Base.empty!(queue::CompilationQueue) = (empty!(queue.tocompile); empty!(queue.inspected))
+markinspected!(queue::CompilationQueue, item) = push!(queue.inspected, item)
+isinspected(queue::CompilationQueue, item) = item in queue.inspected
+Base.isempty(queue::CompilationQueue) = isempty(queue.tocompile)
+
# collect a list of all code that is needed along with CodeInstance to codegen it fully
-function collectinvokes!(wq::Vector{CodeInstance}, ci::CodeInfo)
+function collectinvokes!(workqueue::CompilationQueue, ci::CodeInfo, sptypes::Vector{VarState};
+ invokelatest_queue::Union{CompilationQueue,Nothing} = nothing)
src = ci.code
for i = 1:length(src)
stmt = src[i]
isexpr(stmt, :(=)) && (stmt = stmt.args[2])
if isexpr(stmt, :invoke) || isexpr(stmt, :invoke_modify)
edge = stmt.args[1]
- edge isa CodeInstance && isdefined(edge, :inferred) && push!(wq, edge)
+ edge isa CodeInstance && isdefined(edge, :inferred) && push!(workqueue, edge)
+ end
+
+ invokelatest_queue === nothing && continue
+ if isexpr(stmt, :call)
+ farg = stmt.args[1]
+ !applicable(argextype, farg, ci, sptypes) && continue # TODO: Why is this failing during bootstrap
+ ftyp = widenconst(argextype(farg, ci, sptypes))
+
+ if ftyp === typeof(Core.finalizer) && length(stmt.args) == 3
+ finalizer = argextype(stmt.args[2], ci, sptypes)
+ obj = argextype(stmt.args[3], ci, sptypes)
+ atype = argtypes_to_type(Any[finalizer, obj])
+ else
+ # No dynamic dispatch to resolve / enqueue
+ continue
+ end
+
+ let workqueue = invokelatest_queue
+ # make a best-effort attempt to enqueue the relevant code for the finalizer
+ mi = compileable_specialization_for_call(workqueue.interp, atype)
+ mi === nothing && continue
+
+ push!(workqueue, mi)
+ end
end
# TODO: handle other StmtInfo like @cfunction and OpaqueClosure?
end
end
-# This is a bridge for the C code calling `jl_typeinf_func()` on a single Method match
-function typeinf_ext_toplevel(mi::MethodInstance, world::UInt, source_mode::UInt8)
- interp = NativeInterpreter(world)
- ci = typeinf_ext(interp, mi, source_mode)
- if source_mode == SOURCE_MODE_ABI && ci isa CodeInstance && !ci_has_invoke(ci)
- inspected = IdSet{CodeInstance}()
- tocompile = Vector{CodeInstance}()
- push!(tocompile, ci)
- while !isempty(tocompile)
- # ci_has_real_invoke(ci) && return ci # optimization: cease looping if ci happens to get compiled (not just jl_fptr_wait_for_compiled, but fully jl_is_compiled_codeinst)
- callee = pop!(tocompile)
- ci_has_invoke(callee) && continue
- callee in inspected && continue
- src = get(interp.codegen, callee, nothing)
+function add_codeinsts_to_jit!(interp::AbstractInterpreter, ci, source_mode::UInt8)
+ source_mode == SOURCE_MODE_ABI || return ci
+ ci isa CodeInstance && !ci_has_invoke(ci) || return ci
+ codegen = codegen_cache(interp)
+ codegen === nothing && return ci
+ workqueue = CompilationQueue(; interp)
+ push!(workqueue, ci)
+ while !isempty(workqueue)
+ # ci_has_real_invoke(ci) && return ci # optimization: cease looping if ci happens to get compiled (not just jl_fptr_wait_for_compiled, but fully jl_is_compiled_codeinst)
+ callee = pop!(workqueue)
+ ci_has_invoke(callee) && continue
+ isinspected(workqueue, callee) && continue
+ src = get(codegen, callee, nothing)
+ if !isa(src, CodeInfo)
+ src = @atomic :monotonic callee.inferred
+ if isa(src, String)
+ src = _uncompressed_ir(callee, src)
+ end
if !isa(src, CodeInfo)
- src = @atomic :monotonic callee.inferred
- if isa(src, String)
- src = _uncompressed_ir(callee, src)
+ newcallee = typeinf_ext(workqueue.interp, callee.def, source_mode) # always SOURCE_MODE_ABI
+ if newcallee isa CodeInstance
+ callee === ci && (ci = newcallee) # ci stopped meeting the requirements after typeinf_ext last checked, try again with newcallee
+ push!(workqueue, newcallee)
end
- if !isa(src, CodeInfo)
- newcallee = typeinf_ext(interp, callee.def, source_mode)
- if newcallee isa CodeInstance
- callee === ci && (ci = newcallee) # ci stopped meeting the requirements after typeinf_ext last checked, try again with newcallee
- push!(tocompile, newcallee)
- #else
- # println("warning: could not get source code for ", callee.def)
- end
- continue
+ if newcallee !== callee
+ markinspected!(workqueue, callee)
end
+ continue
end
- push!(inspected, callee)
- collectinvokes!(tocompile, src)
- ccall(:jl_add_codeinst_to_jit, Cvoid, (Any, Any), callee, src)
end
+ markinspected!(workqueue, callee)
+ mi = get_ci_mi(callee)
+ sptypes = sptypes_from_meth_instance(mi)
+ collectinvokes!(workqueue, src, sptypes)
+ if iszero(ccall(:jl_mi_cache_has_ci, Cint, (Any, Any), mi, callee))
+ cached = ccall(:jl_get_ci_equiv, Any, (Any, UInt), callee, get_inference_world(workqueue.interp))::CodeInstance
+ if cached === callee
+ # make sure callee is gc-rooted and cached, as required by jl_add_codeinst_to_jit
+ code_cache(workqueue.interp)[mi] = callee
+ else
+ # use an existing CI from the cache, if there is available one that is compatible
+ callee === ci && (ci = cached)
+ callee = cached
+ end
+ end
+ ccall(:jl_add_codeinst_to_jit, Cvoid, (Any, Any), callee, src)
end
return ci
end
-# This is a bridge for the C code calling `jl_typeinf_func()` on set of Method matches
-function typeinf_ext_toplevel(methods::Vector{Any}, worlds::Vector{UInt}, trim::Bool)
- inspected = IdSet{CodeInstance}()
- tocompile = Vector{CodeInstance}()
- codeinfos = []
- # first compute the ABIs of everything
- for this_world in reverse(sort!(worlds))
- interp = NativeInterpreter(this_world)
- for i = 1:length(methods)
- # each item in this list is either a MethodInstance indicating something
- # to compile, or an svec(rettype, sig) describing a C-callable alias to create.
- item = methods[i]
- if item isa MethodInstance
- # if this method is generally visible to the current compilation world,
- # and this is either the primary world, or not applicable in the primary world
- # then we want to compile and emit this
- if item.def.primary_world <= this_world <= item.def.deleted_world
- ci = typeinf_ext(interp, item, SOURCE_MODE_NOT_REQUIRED)
- ci isa CodeInstance && !use_const_api(ci) && push!(tocompile, ci)
- end
- elseif item isa SimpleVector
- (rt::Type, sig::Type) = item
- # make a best-effort attempt to enqueue the relevant code for the ccallable
- ptr = ccall(:jl_get_specialization1,
- #= MethodInstance =# Ptr{Cvoid}, (Any, Csize_t, Cint),
- sig, this_world, #= mt_cache =# 0)
- if ptr !== C_NULL
- mi = unsafe_pointer_to_objref(ptr)
- ci = typeinf_ext(interp, mi, SOURCE_MODE_NOT_REQUIRED)
- ci isa CodeInstance && !use_const_api(ci) && push!(tocompile, ci)
- end
- # additionally enqueue the ccallable entrypoint / adapter, which implicitly
- # invokes the above ci
- push!(codeinfos, rt)
- push!(codeinfos, sig)
+function typeinf_ext_toplevel(interp::AbstractInterpreter, mi::MethodInstance, source_mode::UInt8)
+ ci = typeinf_ext(interp, mi, source_mode)
+ ci = add_codeinsts_to_jit!(interp, ci, source_mode)
+ return ci
+end
+
+# This is a bridge for the C code calling `jl_typeinf_func()` on a single Method match
+function typeinf_ext_toplevel(mi::MethodInstance, world::UInt, source_mode::UInt8)
+ interp = NativeInterpreter(world)
+ return typeinf_ext_toplevel(interp, mi, source_mode)
+end
+
+function compile!(codeinfos::Vector{Any}, workqueue::CompilationQueue;
+ invokelatest_queue::Union{CompilationQueue,Nothing} = nothing,
+)
+ interp = workqueue.interp
+ world = get_inference_world(interp)
+ while !isempty(workqueue)
+ item = pop!(workqueue)
+ # each item in this list is either a MethodInstance indicating something
+ # to compile, or an svec(rettype, sig) describing a C-callable alias to create.
+ if item isa MethodInstance
+ isinspected(workqueue, item) && continue
+ # if this method is generally visible to the current compilation world,
+ # and this is either the primary world, or not applicable in the primary world
+ # then we want to compile and emit this
+ if item.def.primary_world <= world
+ ci = typeinf_ext(interp, item, SOURCE_MODE_GET_SOURCE)
+ ci isa CodeInstance && push!(workqueue, ci)
end
- end
- while !isempty(tocompile)
- callee = pop!(tocompile)
- callee in inspected && continue
- push!(inspected, callee)
- # now make sure everything has source code, if desired
+ markinspected!(workqueue, item)
+ elseif item isa SimpleVector
+ invokelatest_queue === nothing && continue
+ (rt::Type, sig::Type) = item
+ # make a best-effort attempt to enqueue the relevant code for the ccallable
+ ptr = ccall(:jl_get_specialization1,
+ #= MethodInstance =# Ptr{Cvoid}, (Any, Csize_t, Cint),
+ sig, world, #= mt_cache =# 0)
+ if ptr !== C_NULL
+ mi = unsafe_pointer_to_objref(ptr)::MethodInstance
+ ci = typeinf_ext(interp, mi, SOURCE_MODE_GET_SOURCE)
+ ci isa CodeInstance && push!(invokelatest_queue, ci)
+ end
+ # additionally enqueue the ccallable entrypoint / adapter, which implicitly
+ # invokes the above ci
+ push!(codeinfos, item)
+ elseif item isa CodeInstance
+ callee = item
+ isinspected(workqueue, callee) && continue
mi = get_ci_mi(callee)
- def = mi.def
+ # now make sure everything has source code, if desired
if use_const_api(callee)
- src = codeinfo_for_const(interp, mi, code.rettype_const)
- elseif haskey(interp.codegen, callee)
- src = interp.codegen[callee]
- elseif isa(def, Method) && ccall(:jl_get_module_infer, Cint, (Any,), def.module) == 0 && !trim
- src = retrieve_code_info(mi, get_inference_world(interp))
+ src = codeinfo_for_const(interp, mi, callee.rettype_const)
else
- # TODO: typeinf_code could return something with different edges/ages/owner/abi (needing an update to callee), which we don't handle here
- src = typeinf_code(interp, mi, true)
+ src = get(interp.codegen, callee, nothing)
+ if src === nothing
+ newcallee = typeinf_ext(interp, mi, SOURCE_MODE_GET_SOURCE)
+ if newcallee isa CodeInstance
+ @assert use_const_api(newcallee) || haskey(interp.codegen, newcallee)
+ push!(workqueue, newcallee)
+ end
+ if newcallee !== callee
+ markinspected!(workqueue, callee)
+ end
+ continue
+ end
end
+ markinspected!(workqueue, callee)
if src isa CodeInfo
- collectinvokes!(tocompile, src)
- # It is somewhat ambiguous if typeinf_ext might have callee in the caches,
- # but for the purpose of native compile, we always want them put there.
+ sptypes = sptypes_from_meth_instance(mi)
+ collectinvokes!(workqueue, src, sptypes; invokelatest_queue)
+ # try to reuse an existing CodeInstance from before to avoid making duplicates in the cache
if iszero(ccall(:jl_mi_cache_has_ci, Cint, (Any, Any), mi, callee))
- code_cache(interp)[mi] = callee
+ cached = ccall(:jl_get_ci_equiv, Any, (Any, UInt), callee, world)::CodeInstance
+ if cached === callee
+ code_cache(interp)[mi] = callee
+ else
+ # Use an existing CI from the cache, if there is available one that is compatible
+ callee = cached
+ end
end
push!(codeinfos, callee)
push!(codeinfos, src)
- elseif trim
- println("warning: failed to get code for ", mi)
end
- end
+ else @assert false "unexpected item in queue" end
+ end
+ return codeinfos
+end
+
+# This is a bridge for the C code calling `jl_typeinf_func()` on set of Method matches
+# The trim_mode can be any of:
+const TRIM_NO = 0
+const TRIM_SAFE = 1
+const TRIM_UNSAFE = 2
+const TRIM_UNSAFE_WARN = 3
+function typeinf_ext_toplevel(methods::Vector{Any}, worlds::Vector{UInt}, trim_mode::Int)
+ inf_params = InferenceParams(; force_enable_inference = trim_mode != TRIM_NO)
+
+ # Create an "invokelatest" queue to enable eager compilation of speculative
+ # invokelatest calls such as from `Core.finalizer` and `ccallable`
+ invokelatest_queue = CompilationQueue(;
+ interp = NativeInterpreter(get_world_counter(); inf_params)
+ )
+
+ codeinfos = []
+ workqueue = CompilationQueue(; interp = nothing)
+ for this_world in reverse!(sort!(worlds))
+ workqueue = CompilationQueue(workqueue;
+ interp = NativeInterpreter(this_world; inf_params)
+ )
+
+ append!(workqueue, methods)
+ compile!(codeinfos, workqueue; invokelatest_queue)
+ end
+
+ if invokelatest_queue !== nothing
+ # This queue is intentionally aliased, to handle e.g. a `finalizer` calling `Core.finalizer`
+ # (it will enqueue into itself and immediately drain)
+ compile!(codeinfos, invokelatest_queue; invokelatest_queue)
+ end
+
+ if trim_mode != TRIM_NO && trim_mode != TRIM_UNSAFE
+ verify_typeinf_trim(codeinfos, trim_mode == TRIM_UNSAFE_WARN)
end
return codeinfos
end
+verify_typeinf_trim(codeinfos::Vector{Any}, onlywarn::Bool) = invokelatest(verify_typeinf_trim, stdout, codeinfos, onlywarn)
+
function return_type(@nospecialize(f), t::DataType) # this method has a special tfunc
world = tls_world_age()
args = Any[_return_type, NativeInterpreter(world), Tuple{Core.Typeof(f), t.parameters...}]
diff --git a/Compiler/src/types.jl b/Compiler/src/types.jl
index 6ffb5402682f3..a04c9e70174fe 100644
--- a/Compiler/src/types.jl
+++ b/Compiler/src/types.jl
@@ -23,6 +23,10 @@ the following methods to satisfy the `AbstractInterpreter` API requirement:
- `get_inference_world(interp::NewInterpreter)` - return the world age for this interpreter
- `get_inference_cache(interp::NewInterpreter)` - return the local inference cache
- `cache_owner(interp::NewInterpreter)` - return the owner of any new cache entries
+
+If `CodeInstance`s compiled using `interp::NewInterpreter` are meant to be executed with `invoke`,
+a method `codegen_cache(interp::NewInterpreter) -> IdDict{CodeInstance, CodeInfo}` must be defined,
+and inference must be triggered via `typeinf_ext_toplevel` with source mode `SOURCE_MODE_ABI`.
"""
abstract type AbstractInterpreter end
@@ -106,6 +110,7 @@ mutable struct InferenceResult
effects::Effects # if optimization is finished
analysis_results::AnalysisResults # AnalysisResults with e.g. result::ArgEscapeCache if optimized, otherwise NULL_ANALYSIS_RESULTS
is_src_volatile::Bool # `src` has been cached globally as the compressed format already, allowing `src` to be used destructively
+ tombstone::Bool
#=== uninitialized fields ===#
ci::CodeInstance # CodeInstance if this result may be added to the cache
@@ -116,7 +121,7 @@ mutable struct InferenceResult
ipo_effects = effects = Effects()
analysis_results = NULL_ANALYSIS_RESULTS
return new(mi, argtypes, overridden_by_const, result, exc_result, src,
- valid_worlds, ipo_effects, effects, analysis_results, #=is_src_volatile=#false)
+ valid_worlds, ipo_effects, effects, analysis_results, #=is_src_volatile=#false, false)
end
end
function InferenceResult(mi::MethodInstance, 𝕃::AbstractLattice=fallback_lattice)
@@ -186,6 +191,10 @@ Parameters that control abstract interpretation-based type inference operation.
it will `throw`). Defaults to `false` since this assumption does not hold in Julia's
semantics for native code execution.
---
+- `inf_params.force_enable_inference::Bool = false`\\
+ If `true`, inference will be performed on functions regardless of whether it was disabled
+ at the module level via `Base.Experimental.@compiler_options`.
+---
"""
struct InferenceParams
max_methods::Int
@@ -197,6 +206,7 @@ struct InferenceParams
aggressive_constant_propagation::Bool
assume_bindings_static::Bool
ignore_recursion_hardlimit::Bool
+ force_enable_inference::Bool
function InferenceParams(
max_methods::Int,
@@ -207,7 +217,9 @@ struct InferenceParams
ipo_constant_propagation::Bool,
aggressive_constant_propagation::Bool,
assume_bindings_static::Bool,
- ignore_recursion_hardlimit::Bool)
+ ignore_recursion_hardlimit::Bool,
+ force_enable_inference::Bool,
+ )
return new(
max_methods,
max_union_splitting,
@@ -217,7 +229,9 @@ struct InferenceParams
ipo_constant_propagation,
aggressive_constant_propagation,
assume_bindings_static,
- ignore_recursion_hardlimit)
+ ignore_recursion_hardlimit,
+ force_enable_inference,
+ )
end
end
function InferenceParams(
@@ -230,7 +244,9 @@ function InferenceParams(
#=ipo_constant_propagation::Bool=# true,
#=aggressive_constant_propagation::Bool=# false,
#=assume_bindings_static::Bool=# false,
- #=ignore_recursion_hardlimit::Bool=# false);
+ #=ignore_recursion_hardlimit::Bool=# false,
+ #=force_enable_inference::Bool=# false
+ );
max_methods::Int = params.max_methods,
max_union_splitting::Int = params.max_union_splitting,
max_apply_union_enum::Int = params.max_apply_union_enum,
@@ -239,7 +255,9 @@ function InferenceParams(
ipo_constant_propagation::Bool = params.ipo_constant_propagation,
aggressive_constant_propagation::Bool = params.aggressive_constant_propagation,
assume_bindings_static::Bool = params.assume_bindings_static,
- ignore_recursion_hardlimit::Bool = params.ignore_recursion_hardlimit)
+ ignore_recursion_hardlimit::Bool = params.ignore_recursion_hardlimit,
+ force_enable_inference::Bool = params.force_enable_inference,
+)
return InferenceParams(
max_methods,
max_union_splitting,
@@ -249,7 +267,9 @@ function InferenceParams(
ipo_constant_propagation,
aggressive_constant_propagation,
assume_bindings_static,
- ignore_recursion_hardlimit)
+ ignore_recursion_hardlimit,
+ force_enable_inference,
+ )
end
"""
@@ -430,6 +450,19 @@ to incorporate customized dispatches for the overridden methods.
method_table(interp::AbstractInterpreter) = InternalMethodTable(get_inference_world(interp))
method_table(interp::NativeInterpreter) = interp.method_table
+"""
+ codegen_cache(interp::AbstractInterpreter) -> Union{Nothing, IdDict{CodeInstance, CodeInfo}}
+
+Optionally return a cache associating a `CodeInfo` to a `CodeInstance` that should be added to the JIT
+for future execution via `invoke(f, ::CodeInstance, args...)`. This cache is used during `typeinf_ext_toplevel`,
+and may be safely discarded between calls to this function.
+
+By default, a value of `nothing` is returned indicating that `CodeInstance`s should not be added to the JIT.
+Attempting to execute them via `invoke` will result in an error.
+"""
+codegen_cache(interp::AbstractInterpreter) = nothing
+codegen_cache(interp::NativeInterpreter) = interp.codegen
+
"""
By default `AbstractInterpreter` implements the following inference bail out logic:
- `bail_out_toplevel_call(::AbstractInterpreter, sig, ::InferenceState)`: bail out from
diff --git a/Compiler/src/typeutils.jl b/Compiler/src/typeutils.jl
index d588a9aee1a6c..50b3dc6b0c6f5 100644
--- a/Compiler/src/typeutils.jl
+++ b/Compiler/src/typeutils.jl
@@ -36,14 +36,8 @@ function isTypeDataType(@nospecialize t)
isType(t) && return false
# Could be Union{} at runtime
t === Core.TypeofBottom && return false
- if t.name === Tuple.name
- # If we have a Union parameter, could have been redistributed at runtime,
- # e.g. `Tuple{Union{Int, Float64}, Int}` is a DataType, but
- # `Union{Tuple{Int, Int}, Tuple{Float64, Int}}` is typeequal to it and
- # is not.
- return all(isTypeDataType, t.parameters)
- end
- return true
+ # Return true if `t` is not covariant
+ return t.name !== Tuple.name
end
has_extended_info(@nospecialize x) = (!isa(x, Type) && !isvarargtype(x)) || isType(x)
diff --git a/Compiler/src/utilities.jl b/Compiler/src/utilities.jl
index c322d1062cea1..dfcff03d70f0e 100644
--- a/Compiler/src/utilities.jl
+++ b/Compiler/src/utilities.jl
@@ -129,6 +129,25 @@ function retrieve_code_info(mi::MethodInstance, world::UInt)
else
c = copy(src::CodeInfo)
end
+ if (def.did_scan_source & 0x1) == 0x0
+ # This scan must happen:
+ # 1. After method definition
+ # 2. Before any code instances that may have relied on information
+ # from implicit GlobalRefs for this method are added to the cache
+ # 3. Preferably while the IR is already uncompressed
+ # 4. As late as possible, as early adding of the backedges may cause
+ # spurious invalidations.
+ #
+ # At the moment we do so here, because
+ # 1. It's reasonably late
+ # 2. It has easy access to the uncompressed IR
+ # 3. We necessarily pass through here before relying on any
+ # information obtained from implicit GlobalRefs.
+ #
+ # However, the exact placement of this scan is not as important as
+ # long as the above conditions are met.
+ ccall(:jl_scan_method_source_now, Cvoid, (Any, Any), def, c)
+ end
end
if c isa CodeInfo
c.parent = mi
@@ -310,7 +329,7 @@ end
inlining_enabled() = (JLOptions().can_inline == 1)
-function coverage_enabled(m::Module)
+function instrumentation_enabled(m::Module, only_if_affects_optimizer::Bool)
generating_output() && return false # don't alter caches
cov = JLOptions().code_coverage
if cov == 1 # user
@@ -321,6 +340,17 @@ function coverage_enabled(m::Module)
elseif cov == 2 # all
return true
end
+ if !only_if_affects_optimizer
+ log = JLOptions().malloc_log
+ if log == 1 # user
+ m = moduleroot(m)
+ m === Core && return false
+ isdefined(Main, :Base) && m === Main.Base && return false
+ return true
+ elseif log == 2 # all
+ return true
+ end
+ end
return false
end
@@ -332,3 +362,5 @@ function inbounds_option()
end
is_asserts() = ccall(:jl_is_assertsbuild, Cint, ()) == 1
+
+_time_ns() = ccall(:jl_hrtime, UInt64, ())
diff --git a/Compiler/src/validation.jl b/Compiler/src/validation.jl
index 6700aa8d4508f..d5faf51a89356 100644
--- a/Compiler/src/validation.jl
+++ b/Compiler/src/validation.jl
@@ -22,8 +22,8 @@ const VALID_EXPR_HEADS = IdDict{Symbol,UnitRange{Int}}(
:copyast => 1:1,
:meta => 0:typemax(Int),
:global => 1:1,
- :globaldecl => 2:2,
- :foreigncall => 5:typemax(Int), # name, RT, AT, nreq, (cconv, effects), args..., roots...
+ :globaldecl => 1:2,
+ :foreigncall => 5:typemax(Int), # name, RT, AT, nreq, (cconv, effects, gc_safe), args..., roots...
:cfunction => 5:5,
:isdefined => 1:2,
:code_coverage_effect => 0:0,
@@ -225,7 +225,7 @@ function validate_code!(errors::Vector{InvalidCodeError}, mi::Core.MethodInstanc
mnargs = 0
else
m = mi.def::Method
- mnargs = m.nargs
+ mnargs = Int(m.nargs)
n_sig_params = length((unwrap_unionall(m.sig)::DataType).parameters)
if m.is_for_opaque_closure
m.sig === Tuple || push!(errors, InvalidCodeError(INVALID_SIGNATURE_OPAQUE_CLOSURE, (m.sig, m.isva)))
@@ -234,6 +234,7 @@ function validate_code!(errors::Vector{InvalidCodeError}, mi::Core.MethodInstanc
end
end
if isa(c, CodeInfo)
+ mnargs = Int(c.nargs)
mnargs > length(c.slotnames) && push!(errors, InvalidCodeError(SLOTNAMES_NARGS_MISMATCH))
validate_code!(errors, c, is_top_level)
end
diff --git a/Compiler/src/verifytrim.jl b/Compiler/src/verifytrim.jl
new file mode 100644
index 0000000000000..09a189b2ff223
--- /dev/null
+++ b/Compiler/src/verifytrim.jl
@@ -0,0 +1,383 @@
+# This file is a part of Julia. License is MIT: https://julialang.org/license
+
+import ..Compiler: verify_typeinf_trim, NativeInterpreter, argtypes_to_type, compileable_specialization_for_call
+
+using ..Compiler:
+ # operators
+ !, !=, !==, +, :, <, <=, ==, =>, >, >=, ∈, ∉,
+ # types
+ Array, Builtin, Callable, Cint, CodeInfo, CodeInstance, Csize_t, Exception,
+ GenericMemory, GlobalRef, IdDict, IdSet, IntrinsicFunction, Method, MethodInstance,
+ NamedTuple, Pair, PhiCNode, PhiNode, PiNode, QuoteNode, SSAValue, SimpleVector, String,
+ Tuple, VarState, Vector,
+ # functions
+ argextype, empty!, error, get, get_ci_mi, get_world_counter, getindex, getproperty,
+ hasintersect, haskey, in, isdispatchelem, isempty, isexpr, iterate, length, map!, max,
+ pop!, popfirst!, push!, pushfirst!, reinterpret, reverse!, reverse, setindex!,
+ setproperty!, similar, singleton_type, sptypes_from_meth_instance,
+ unsafe_pointer_to_objref, widenconst, isconcretetype,
+ # misc
+ @nospecialize, @assert, C_NULL
+using ..IRShow: LineInfoNode, print, show, println, append_scopes!, IOContext, IO, normalize_method_name
+using ..Base: Base, sourceinfo_slotnames
+using ..Base.StackTraces: StackFrame
+
+## declarations ##
+
+struct CallMissing <: Exception
+ codeinst::CodeInstance
+ codeinfo::CodeInfo
+ sptypes::Vector{VarState}
+ stmtidx::Int
+ desc::String
+end
+
+struct CCallableMissing <: Exception
+ rt
+ sig
+ desc
+end
+
+const ParentMap = IdDict{CodeInstance,Tuple{CodeInstance,Int}}
+const ErrorList = Vector{Pair{Bool,Any}} # severity => exception
+
+const runtime_functions = Symbol[
+ # a denylist of any runtime functions which someone might ccall which can call jl_apply or access reflection state
+ # which might not be captured by the trim output
+ :jl_apply,
+]
+
+## code for pretty printing ##
+
+# wrap a statement in a typeassert for printing clarity, unless that info seems already obvious
+function mapssavaluetypes(codeinfo::CodeInfo, sptypes::Vector{VarState}, stmt)
+ @nospecialize stmt
+ newstmt = mapssavalues(codeinfo, sptypes, stmt)
+ typ = widenconst(argextype(stmt, codeinfo, sptypes))
+ if newstmt isa Expr
+ if newstmt.head ∈ (:quote, :inert)
+ return newstmt
+ end
+ elseif newstmt isa GlobalRef && isdispatchelem(typ)
+ return newstmt
+ elseif newstmt isa Union{Int, UInt8, UInt16, UInt32, UInt64, Float16, Float32, Float64, String, QuoteNode}
+ return newstmt
+ elseif newstmt isa Callable
+ return newstmt
+ end
+ return Expr(:(::), newstmt, typ)
+end
+
+# map the ssavalues in a (value-producing) statement to the expression they came from, summarizing some things to avoid excess printing
+function mapssavalues(codeinfo::CodeInfo, sptypes::Vector{VarState}, stmt)
+ @nospecialize stmt
+ if stmt isa SSAValue
+ return mapssavalues(codeinfo, sptypes, codeinfo.code[stmt.id])
+ elseif stmt isa PiNode
+ return mapssavalues(codeinfo, sptypes, stmt.val)
+ elseif stmt isa Expr
+ stmt.head ∈ (:quote, :inert) && return stmt
+ newstmt = Expr(stmt.head)
+ if stmt.head === :foreigncall
+ return Expr(:call, :ccall, mapssavalues(codeinfo, sptypes, stmt.args[1]))
+ elseif stmt.head ∉ (:new, :method, :toplevel, :thunk)
+ newstmt.args = map!(similar(stmt.args), stmt.args) do arg
+ @nospecialize arg
+ return mapssavaluetypes(codeinfo, sptypes, arg)
+ end
+ if newstmt.head === :invoke
+ # why is the fancy printing for this not in show_unquoted?
+ popfirst!(newstmt.args)
+ newstmt.head = :call
+ end
+ end
+ return newstmt
+ elseif stmt isa PhiNode
+ return PhiNode()
+ elseif stmt isa PhiCNode
+ return PhiNode()
+ end
+ return stmt
+end
+
+function verify_print_stmt(io::IOContext{IO}, codeinfo::CodeInfo, sptypes::Vector{VarState}, stmtidx::Int)
+ if codeinfo.slotnames !== nothing
+ io = IOContext(io, :SOURCE_SLOTNAMES => sourceinfo_slotnames(codeinfo))
+ end
+ print(io, mapssavaluetypes(codeinfo, sptypes, SSAValue(stmtidx)))
+end
+
+function verify_print_error(io::IOContext{IO}, desc::CallMissing, parents::ParentMap)
+ (; codeinst, codeinfo, sptypes, stmtidx, desc) = desc
+ frames = verify_create_stackframes(codeinst, stmtidx, parents)
+ print(io, desc, " from statement ")
+ verify_print_stmt(io, codeinfo, sptypes, stmtidx)
+ Base.show_backtrace(io, frames)
+ print(io, "\n\n")
+ nothing
+end
+
+function verify_print_error(io::IOContext{IO}, desc::CCallableMissing, parents::ParentMap)
+ print(io, desc.desc, " for ", desc.sig, " => ", desc.rt, "\n\n")
+ nothing
+end
+
+function verify_create_stackframes(codeinst::CodeInstance, stmtidx::Int, parents::ParentMap)
+ scopes = LineInfoNode[]
+ frames = StackFrame[]
+ parent = (codeinst, stmtidx)
+ while parent !== nothing
+ codeinst, stmtidx = parent
+ di = codeinst.debuginfo
+ append_scopes!(scopes, stmtidx, di, :var"unknown scope")
+ for i in reverse(1:length(scopes))
+ lno = scopes[i]
+ inlined = i != 1
+ def = lno.method
+ def isa Union{Method,Core.CodeInstance,MethodInstance} || (def = nothing)
+ sf = StackFrame(normalize_method_name(lno.method), lno.file, lno.line, def, false, inlined, 0)
+ push!(frames, sf)
+ end
+ empty!(scopes)
+ parent = get(parents, codeinst, nothing)
+ end
+ return frames
+end
+
+## code for analysis ##
+
+function may_dispatch(@nospecialize ftyp)
+ if ftyp <: IntrinsicFunction
+ return true
+ elseif ftyp <: Builtin
+ # other builtins (including the IntrinsicFunctions) are good
+ return Core._apply isa ftyp ||
+ Core._apply_iterate isa ftyp ||
+ Core._call_in_world_total isa ftyp ||
+ Core.invoke isa ftyp ||
+ Core.invoke_in_world isa ftyp ||
+ Core.invokelatest isa ftyp ||
+ Core.finalizer isa ftyp ||
+ Core.modifyfield! isa ftyp ||
+ Core.modifyglobal! isa ftyp ||
+ Core.memoryrefmodify! isa ftyp
+ else
+ return true
+ end
+end
+
+function verify_codeinstance!(interp::NativeInterpreter, codeinst::CodeInstance, codeinfo::CodeInfo, inspected::IdSet{CodeInstance}, caches::IdDict{MethodInstance,CodeInstance}, parents::ParentMap, errors::ErrorList)
+ mi = get_ci_mi(codeinst)
+ sptypes = sptypes_from_meth_instance(mi)
+ src = codeinfo.code
+ for i = 1:length(src)
+ stmt = src[i]
+ isexpr(stmt, :(=)) && (stmt = stmt.args[2])
+ error = ""
+ warn = false
+ if isexpr(stmt, :invoke) || isexpr(stmt, :invoke_modify)
+ error = "unresolved invoke"
+ edge = stmt.args[1]
+ if edge isa CodeInstance
+ haskey(parents, edge) || (parents[edge] = (codeinst, i))
+ edge in inspected && continue
+ edge_mi = get_ci_mi(edge)
+ if edge_mi === edge.def
+ ci = get(caches, edge_mi, nothing)
+ ci isa CodeInstance && continue # assume that only this_world matters for trim
+ end
+ end
+ # TODO: check for calls to Base.atexit?
+ elseif isexpr(stmt, :call)
+ error = "unresolved call"
+ farg = stmt.args[1]
+ ftyp = widenconst(argextype(farg, codeinfo, sptypes))
+ if ftyp <: IntrinsicFunction
+ #TODO: detect if f !== Core.Intrinsics.atomic_pointermodify (see statement_cost), otherwise error
+ continue
+ elseif ftyp <: Builtin
+ if !may_dispatch(ftyp)
+ continue
+ end
+ if !isconcretetype(ftyp)
+ error = "unresolved call to (unknown) builtin"
+ elseif Core._apply_iterate isa ftyp
+ if length(stmt.args) >= 3
+ # args[1] is _apply_iterate object
+ # args[2] is invoke object
+ farg = stmt.args[3]
+ ftyp = widenconst(argextype(farg, codeinfo, sptypes))
+ if may_dispatch(ftyp)
+ error = "unresolved call to function"
+ else
+ for i in 4:length(stmt.args)
+ atyp = widenconst(argextype(stmt.args[i], codeinfo, sptypes))
+ if !(atyp <: Union{SimpleVector, GenericMemory, Array, Tuple, NamedTuple})
+ error = "unresolved argument to call"
+ break
+ end
+ end
+ end
+ end
+ elseif Core.finalizer isa ftyp
+ if length(stmt.args) == 3
+ finalizer = argextype(stmt.args[2], codeinfo, sptypes)
+ obj = argextype(stmt.args[3], codeinfo, sptypes)
+ atype = argtypes_to_type(Any[finalizer, obj])
+
+ mi = compileable_specialization_for_call(interp, atype)
+ if mi !== nothing
+ ci = get(caches, mi, nothing)
+ ci isa CodeInstance && continue
+ end
+
+ error = "unresolved finalizer registered"
+ end
+ elseif Core._apply isa ftyp
+ error = "trim verification not yet implemented for builtin `Core._apply`"
+ elseif Core._call_in_world_total isa ftyp
+ error = "trim verification not yet implemented for builtin `Core._call_in_world_total`"
+ elseif Core.invoke isa ftyp
+ error = "trim verification not yet implemented for builtin `Core.invoke`"
+ elseif Core.invoke_in_world isa ftyp
+ error = "trim verification not yet implemented for builtin `Core.invoke_in_world`"
+ elseif Core.invokelatest isa ftyp
+ error = "trim verification not yet implemented for builtin `Core.invokelatest`"
+ elseif Core.modifyfield! isa ftyp
+ error = "trim verification not yet implemented for builtin `Core.modifyfield!`"
+ elseif Core.modifyglobal! isa ftyp
+ error = "trim verification not yet implemented for builtin `Core.modifyglobal!`"
+ elseif Core.memoryrefmodify! isa ftyp
+ error = "trim verification not yet implemented for builtin `Core.memoryrefmodify!`"
+ else @assert false "unexpected builtin" end
+ end
+ extyp = argextype(SSAValue(i), codeinfo, sptypes)
+ if extyp === Union{}
+ warn = true # downgrade must-throw calls to be only a warning
+ end
+ elseif isexpr(stmt, :cfunction)
+ error = "unresolved cfunction"
+ #TODO: parse the cfunction expression to check the target is defined
+ warn = true
+ elseif isexpr(stmt, :foreigncall)
+ foreigncall = stmt.args[1]
+ if foreigncall isa QuoteNode
+ if foreigncall.value in runtime_functions
+ error = "disallowed ccall into a runtime function"
+ end
+ end
+ elseif isexpr(stmt, :new_opaque_closure)
+ error = "unresolved opaque closure"
+ # TODO: check that this opaque closure has a valid signature for possible codegen and code defined for it
+ warn = true
+ end
+ if !isempty(error)
+ push!(errors, warn => CallMissing(codeinst, codeinfo, sptypes, i, error))
+ end
+ end
+end
+
+## entry-point ##
+
+function get_verify_typeinf_trim(codeinfos::Vector{Any})
+ this_world = get_world_counter()
+ interp = NativeInterpreter(this_world)
+ inspected = IdSet{CodeInstance}()
+ caches = IdDict{MethodInstance,CodeInstance}()
+ errors = ErrorList()
+ parents = ParentMap()
+ for i = 1:length(codeinfos)
+ item = codeinfos[i]
+ if item isa CodeInstance
+ push!(inspected, item)
+ if item.owner === nothing && item.min_world <= this_world <= item.max_world
+ mi = get_ci_mi(item)
+ if mi === item.def
+ caches[mi] = item
+ end
+ end
+ end
+ end
+ for i = 1:length(codeinfos)
+ item = codeinfos[i]
+ if item isa CodeInstance
+ src = codeinfos[i + 1]::CodeInfo
+ verify_codeinstance!(interp, item, src, inspected, caches, parents, errors)
+ elseif item isa SimpleVector
+ rt = item[1]::Type
+ sig = item[2]::Type
+ ptr = ccall(:jl_get_specialization1,
+ #= MethodInstance =# Ptr{Cvoid}, (Any, Csize_t, Cint),
+ sig, this_world, #= mt_cache =# 0)
+ asrt = Any
+ valid = if ptr !== C_NULL
+ mi = unsafe_pointer_to_objref(ptr)::MethodInstance
+ ci = get(caches, mi, nothing)
+ if ci isa CodeInstance
+ # TODO: should we find a way to indicate to the user that this gets called via ccallable?
+ # parent[ci] = something
+ asrt = ci.rettype
+ true
+ else
+ false
+ end
+ else
+ false
+ end
+ if !valid
+ warn = false
+ push!(errors, warn => CCallableMissing(rt, sig, "unresolved ccallable"))
+ elseif !(asrt <: rt)
+ warn = hasintersect(asrt, rt)
+ push!(errors, warn => CCallableMissing(asrt, sig, "ccallable declared return type does not match inference"))
+ end
+ end
+ end
+ return (errors, parents)
+end
+
+# It is unclear if this file belongs in Compiler itself, or should instead be a codegen
+# driver / verifier implemented by juliac-buildscript.jl for the purpose of extensibility.
+# For now, it is part of Base.Compiler, but executed with invokelatest so that packages
+# could provide hooks to change, customize, or tweak its behavior and heuristics.
+function verify_typeinf_trim(io::IO, codeinfos::Vector{Any}, onlywarn::Bool)
+ errors, parents = get_verify_typeinf_trim(codeinfos)
+
+ # count up how many messages we printed, of each severity
+ counts = [0, 0] # errors, warnings
+ io = IOContext{IO}(io)
+ # print all errors afterwards, when the parents map is fully constructed
+ for desc in errors
+ warn, desc = desc
+ severity = warn ? 2 : 1
+ no = (counts[severity] += 1)
+ print(io, warn ? "Verifier warning #" : "Verifier error #", no, ": ")
+ # TODO: should we coalesce any of these stacktraces to minimize spew?
+ verify_print_error(io, desc, parents)
+ end
+
+ ## TODO: compute and display the minimum and/or full call graph instead of merely the first parent stacktrace?
+ #for i = 1:length(codeinfos)
+ # item = codeinfos[i]
+ # if item isa CodeInstance
+ # println(item, "::", item.rettype)
+ # end
+ #end
+
+ let severity = 0
+ if counts[1] > 0 || counts[2] > 0
+ print("Trim verify finished with ")
+ print(counts[1], counts[1] == 1 ? " error" : " errors")
+ print(", ")
+ print(counts[2], counts[2] == 1 ? " warning" : " warnings")
+ print(".\n")
+ severity = 2
+ end
+ if counts[1] > 0
+ severity = 1
+ end
+ # messages classified as errors are fatal, warnings are not
+ 0 < severity <= 1 && !onlywarn && throw(Core.TrimFailure())
+ end
+ nothing
+end
diff --git a/Compiler/test/AbstractInterpreter.jl b/Compiler/test/AbstractInterpreter.jl
index 533eaf93937a3..83218d73cad69 100644
--- a/Compiler/test/AbstractInterpreter.jl
+++ b/Compiler/test/AbstractInterpreter.jl
@@ -534,3 +534,17 @@ let interp = DebugInterp()
end
@test found
end
+
+@newinterp InvokeInterp
+struct InvokeOwner end
+codegen = IdDict{CodeInstance, CodeInfo}()
+Compiler.cache_owner(::InvokeInterp) = InvokeOwner()
+Compiler.codegen_cache(::InvokeInterp) = codegen
+let interp = InvokeInterp()
+ source_mode = Compiler.SOURCE_MODE_ABI
+ f = (+)
+ args = (1, 1)
+ mi = @ccall jl_method_lookup(Any[f, args...]::Ptr{Any}, (1+length(args))::Csize_t, Base.tls_world_age()::Csize_t)::Ref{Core.MethodInstance}
+ ci = Compiler.typeinf_ext_toplevel(interp, mi, source_mode)
+ @test invoke(f, ci, args...) == 2
+end
diff --git a/Compiler/test/codegen.jl b/Compiler/test/codegen.jl
index 57a9c26aefac6..8f5a47c5efa62 100644
--- a/Compiler/test/codegen.jl
+++ b/Compiler/test/codegen.jl
@@ -889,57 +889,6 @@ ex54166 = Union{Missing, Int64}[missing -2; missing -2];
dims54166 = (1,2)
@test (minimum(ex54166; dims=dims54166)[1] === missing)
-# #54109 - Excessive LLVM time for egal
-struct DefaultOr54109{T}
- x::T
- default::Bool
-end
-
-@eval struct Torture1_54109
- $((Expr(:(::), Symbol("x$i"), DefaultOr54109{Float64}) for i = 1:897)...)
-end
-Torture1_54109() = Torture1_54109((DefaultOr54109(1.0, false) for i = 1:897)...)
-
-@eval struct Torture2_54109
- $((Expr(:(::), Symbol("x$i"), DefaultOr54109{Float64}) for i = 1:400)...)
- $((Expr(:(::), Symbol("x$(i+400)"), DefaultOr54109{Int16}) for i = 1:400)...)
-end
-Torture2_54109() = Torture2_54109((DefaultOr54109(1.0, false) for i = 1:400)..., (DefaultOr54109(Int16(1), false) for i = 1:400)...)
-
-@noinline egal_any54109(x, @nospecialize(y::Any)) = x === Base.compilerbarrier(:type, y)
-
-let ir1 = get_llvm(egal_any54109, Tuple{Torture1_54109, Any}),
- ir2 = get_llvm(egal_any54109, Tuple{Torture2_54109, Any})
-
- # We can't really do timing on CI, so instead, let's look at the length of
- # the optimized IR. The original version had tens of thousands of lines and
- # was slower, so just check here that we only have < 500 lines. If somebody,
- # implements a better comparison that's larger than that, just re-benchmark
- # this and adjust the threshold.
-
- @test count(==('\n'), ir1) < 500
- @test count(==('\n'), ir2) < 500
-end
-
-## Regression test for egal of a struct of this size without padding, but with
-## non-bitsegal, to make sure that it doesn't accidentally go down the accelerated
-## path.
-@eval struct BigStructAnyInt
- $((Expr(:(::), Symbol("x$i"), Pair{Any, Int}) for i = 1:33)...)
-end
-BigStructAnyInt() = BigStructAnyInt((Union{Base.inferencebarrier(Float64), Int}=>i for i = 1:33)...)
-@test egal_any54109(BigStructAnyInt(), BigStructAnyInt())
-
-## For completeness, also test correctness, since we don't have a lot of
-## large-struct tests.
-
-# The two allocations of the same struct will likely have different padding,
-# we want to make sure we find them egal anyway - a naive memcmp would
-# accidentally look at it.
-@test egal_any54109(Torture1_54109(), Torture1_54109())
-@test egal_any54109(Torture2_54109(), Torture2_54109())
-@test !egal_any54109(Torture1_54109(), Torture1_54109((DefaultOr54109(2.0, false) for i = 1:897)...))
-
bar54599() = Base.inferencebarrier(true) ? (Base.PkgId(Main),1) : nothing
function foo54599()
@@ -1058,3 +1007,32 @@ let
end
nothing
end
+
+# Test that turning an implicit import into an explicit one doesn't pessimize codegen
+module TurnedIntoExplicit
+ using Test
+ import ..get_llvm
+
+ module ReExportBitCast
+ export bitcast
+ import Base: bitcast
+ end
+ using .ReExportBitCast
+
+ f(x::UInt) = bitcast(Float64, x)
+
+ @test !occursin("jl_apply_generic", get_llvm(f, Tuple{UInt}))
+
+ import Base: bitcast
+
+ @test !occursin("jl_apply_generic", get_llvm(f, Tuple{UInt}))
+end
+
+# Test codegen for `isdefinedglobal` of constant (#57872)
+const x57872 = "Hello"
+f57872() = (Core.isdefinedglobal(@__MODULE__, Base.compilerbarrier(:const, :x57872)), x57872) # Extra globalref here to force world age bounds
+@test f57872() == (true, "Hello")
+
+@noinline f_mutateany(@nospecialize x) = x[] = 1
+g_mutateany() = (y = Ref(0); f_mutateany(y); y[])
+@test g_mutateany() === 1
diff --git a/Compiler/test/contextual.jl b/Compiler/test/contextual.jl
index a9c63ab34c0c0..941ce172d41e2 100644
--- a/Compiler/test/contextual.jl
+++ b/Compiler/test/contextual.jl
@@ -1,19 +1,23 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license
+module contextual
+
# N.B.: This file is also run from interpreter.jl, so needs to be standalone-executable
using Test
-include("setup_Compiler.jl")
# Cassette
# ========
+# TODO Use CassetteBase.jl instead of this mini-cassette?
+
module MiniCassette
# A minimal demonstration of the cassette mechanism. Doesn't support all the
# fancy features, but sufficient to exercise this code path in the compiler.
+ using Core: SimpleVector
using Core.IR
- using ..Compiler
- using ..Compiler: retrieve_code_info, quoted, anymap
+ using Base: Compiler as CC
+ using .CC: retrieve_code_info, quoted, anymap
using Base.Meta: isexpr
export Ctx, overdub
@@ -21,7 +25,7 @@ module MiniCassette
struct Ctx; end
# A no-op cassette-like transform
- function transform_expr(expr, map_slot_number, map_ssa_value, sparams::Core.SimpleVector)
+ function transform_expr(expr, map_slot_number, map_ssa_value, sparams::SimpleVector)
@nospecialize expr
transform(@nospecialize expr) = transform_expr(expr, map_slot_number, map_ssa_value, sparams)
if isexpr(expr, :call)
@@ -45,11 +49,11 @@ module MiniCassette
end
end
- function transform!(mi::MethodInstance, ci::CodeInfo, nargs::Int, sparams::Core.SimpleVector)
+ function transform!(mi::MethodInstance, ci::CodeInfo, nargs::Int, sparams::SimpleVector)
code = ci.code
- di = Compiler.DebugInfoStream(mi, ci.debuginfo, length(code))
- ci.slotnames = Symbol[Symbol("#self#"), :ctx, :f, :args, ci.slotnames[nargs+1:end]...]
- ci.slotflags = UInt8[(0x00 for i = 1:4)..., ci.slotflags[nargs+1:end]...]
+ di = CC.DebugInfoStream(mi, ci.debuginfo, length(code))
+ ci.slotnames = Symbol[Symbol("#self#"), :ctx, :f, :args, ci.slotnames[nargs+2:end]...]
+ ci.slotflags = UInt8[(0x00 for i = 1:4)..., ci.slotflags[nargs+2:end]...]
# Insert one SSAValue for every argument statement
prepend!(code, Any[Expr(:call, getfield, SlotNumber(4), i) for i = 1:nargs])
prepend!(di.codelocs, fill(Int32(0), 3nargs))
@@ -76,21 +80,26 @@ module MiniCassette
function overdub_generator(world::UInt, source, self, ctx, f, args)
@nospecialize
+ argnames = Core.svec(:overdub, :ctx, :f, :args)
+ spnames = Core.svec()
+
if !Base.issingletontype(f)
# (c, f, args..) -> f(args...)
- ex = :(return f(args...))
- return Core.GeneratedFunctionStub(identity, Core.svec(:overdub, :ctx, :f, :args), Core.svec())(world, source, ex)
+ return generate_lambda_ex(world, source, argnames, spnames, :(return f(args...)))
end
tt = Tuple{f, args...}
match = Base._which(tt; world)
mi = Base.specialize_method(match)
# Unsupported in this mini-cassette
- @assert !mi.def.isva
+ !mi.def.isva ||
+ return generate_lambda_ex(world, source, argnames, spnames, :(error("Unsupported vararg method")))
src = retrieve_code_info(mi, world)
- @assert isa(src, CodeInfo)
+ isa(src, CodeInfo) ||
+ return generate_lambda_ex(world, source, argnames, spnames, :(error("Unexpected code transformation")))
src = copy(src)
- @assert src.edges === Core.svec()
+ src.edges === Core.svec() ||
+ return generate_lambda_ex(world, source, argnames, spnames, :(error("Unexpected code transformation")))
src.edges = Any[mi]
transform!(mi, src, length(args), match.sparams)
# TODO: this is mandatory: code_info.min_world = max(code_info.min_world, min_world[])
@@ -98,9 +107,21 @@ module MiniCassette
# Match the generator, since that's what our transform! does
src.nargs = 4
src.isva = true
+ errors = CC.validate_code(mi, src)
+ if !isempty(errors)
+ foreach(Core.println, errors)
+ return generate_lambda_ex(world, source, argnames, spnames, :(error("Found errors in generated code")))
+ end
return src
end
+ function generate_lambda_ex(world::UInt, source::Method,
+ argnames::SimpleVector, spnames::SimpleVector,
+ body::Expr)
+ stub = Core.GeneratedFunctionStub(identity, argnames, spnames)
+ return stub(world, source, body)
+ end
+
@inline overdub(::Ctx, f::Union{Core.Builtin, Core.IntrinsicFunction}, args...) = f(args...)
@eval function overdub(ctx::Ctx, f, args...)
@@ -124,3 +145,8 @@ f() = 2
foo(i) = i+bar(Val(1))
@test @inferred(overdub(Ctx(), foo, 1)) == 43
+
+morethan4args(a, b, c, d, e) = (((a + b) + c) + d) + e
+@test overdub(Ctx(), morethan4args, 1, 2, 3, 4, 5) == 15
+
+end # module contextual
diff --git a/Compiler/test/effects.jl b/Compiler/test/effects.jl
index b8a841b6b74b7..720825aa145f8 100644
--- a/Compiler/test/effects.jl
+++ b/Compiler/test/effects.jl
@@ -378,32 +378,38 @@ let effects = Base.infer_effects(; optimize=false) do
end
# we should taint `nothrow` if the binding doesn't exist and isn't fixed yet,
-# as the cached effects can be easily wrong otherwise
-# since the inference currently doesn't track "world-age" of global variables
-@eval global_assignment_undefinedyet() = $(GlobalRef(@__MODULE__, :UNDEFINEDYET)) = 42
setglobal!_nothrow_undefinedyet() = setglobal!(@__MODULE__, :UNDEFINEDYET, 42)
-let effects = Base.infer_effects() do
- global_assignment_undefinedyet()
- end
+let effects = Base.infer_effects(setglobal!_nothrow_undefinedyet)
@test !Compiler.is_nothrow(effects)
end
-let effects = Base.infer_effects() do
- setglobal!_nothrow_undefinedyet()
- end
+@test_throws ErrorException setglobal!_nothrow_undefinedyet()
+# This declares the binding as ::Any
+@eval global_assignment_undefinedyet() = $(GlobalRef(@__MODULE__, :UNDEFINEDYET)) = 42
+let effects = Base.infer_effects(global_assignment_undefinedyet)
+ @test Compiler.is_nothrow(effects)
+end
+# Again with type mismatch
+global UNDEFINEDYET2::String = "0"
+setglobal!_nothrow_undefinedyet2() = setglobal!(@__MODULE__, :UNDEFINEDYET2, 42)
+@eval global_assignment_undefinedyet2() = $(GlobalRef(@__MODULE__, :UNDEFINEDYET2)) = 42
+let effects = Base.infer_effects(global_assignment_undefinedyet2)
@test !Compiler.is_nothrow(effects)
end
-global UNDEFINEDYET::String = "0"
-let effects = Base.infer_effects() do
- global_assignment_undefinedyet()
- end
+let effects = Base.infer_effects(setglobal!_nothrow_undefinedyet2)
@test !Compiler.is_nothrow(effects)
end
-let effects = Base.infer_effects() do
- setglobal!_nothrow_undefinedyet()
- end
+@test_throws TypeError setglobal!_nothrow_undefinedyet2()
+
+module ExportMutableGlobal
+ global mutable_global_for_setglobal_test::Int = 0
+ export mutable_global_for_setglobal_test
+end
+using .ExportMutableGlobal: mutable_global_for_setglobal_test
+f_assign_imported() = global mutable_global_for_setglobal_test = 42
+let effects = Base.infer_effects(f_assign_imported)
@test !Compiler.is_nothrow(effects)
end
-@test_throws Union{ErrorException,TypeError} setglobal!_nothrow_undefinedyet() # TODO: what kind of error should this be?
+@test_throws ErrorException f_assign_imported()
# Nothrow for setfield!
mutable struct SetfieldNothrow
@@ -1395,3 +1401,73 @@ end == Compiler.EFFECTS_UNKNOWN
@test !Compiler.intrinsic_nothrow(Core.Intrinsics.fpext, Any[Type{Float32}, Float64])
@test !Compiler.intrinsic_nothrow(Core.Intrinsics.fpext, Any[Type{Int32}, Float16])
@test !Compiler.intrinsic_nothrow(Core.Intrinsics.fpext, Any[Type{Float32}, Int16])
+
+# Float intrinsics require float arguments
+@test Base.infer_effects((Int16,)) do x
+ return Core.Intrinsics.abs_float(x)
+end |> !Compiler.is_nothrow
+@test Base.infer_effects((Int32, Int32)) do x, y
+ return Core.Intrinsics.add_float(x, y)
+end |> !Compiler.is_nothrow
+@test Base.infer_effects((Int32, Int32)) do x, y
+ return Core.Intrinsics.add_float(x, y)
+end |> !Compiler.is_nothrow
+@test Base.infer_effects((Int64, Int64, Int64)) do x, y, z
+ return Core.Intrinsics.fma_float(x, y, z)
+end |> !Compiler.is_nothrow
+@test Base.infer_effects((Int64,)) do x
+ return Core.Intrinsics.fptoui(UInt32, x)
+end |> !Compiler.is_nothrow
+@test Base.infer_effects((Int64,)) do x
+ return Core.Intrinsics.fptosi(Int32, x)
+end |> !Compiler.is_nothrow
+@test Base.infer_effects((Int64,)) do x
+ return Core.Intrinsics.sitofp(Int64, x)
+end |> !Compiler.is_nothrow
+@test Base.infer_effects((UInt64,)) do x
+ return Core.Intrinsics.uitofp(Int64, x)
+end |> !Compiler.is_nothrow
+
+# effects modeling for pointer-related intrinsics
+let effects = Base.infer_effects(Core.Intrinsics.pointerref, Tuple{Vararg{Any}})
+ @test !Compiler.is_consistent(effects)
+ @test Compiler.is_effect_free(effects)
+ @test !Compiler.is_inaccessiblememonly(effects)
+end
+let effects = Base.infer_effects(Core.Intrinsics.pointerset, Tuple{Vararg{Any}})
+ @test Compiler.is_consistent(effects)
+ @test !Compiler.is_effect_free(effects)
+end
+# effects modeling for atomic intrinsics
+# these functions especially need to be marked !effect_free since they imply synchronization
+for atomicfunc = Any[
+ Core.Intrinsics.atomic_pointerref,
+ Core.Intrinsics.atomic_pointerset,
+ Core.Intrinsics.atomic_pointerswap,
+ Core.Intrinsics.atomic_pointerreplace,
+ Core.Intrinsics.atomic_fence]
+ @test !Compiler.is_effect_free(Base.infer_effects(atomicfunc, Tuple{Vararg{Any}}))
+end
+
+# effects modeling for intrinsics that can do arbitrary things
+let effects = Base.infer_effects(Core.Intrinsics.llvmcall, Tuple{Vararg{Any}})
+ @test effects == Compiler.Effects()
+end
+let effects = Base.infer_effects(Core.Intrinsics.atomic_pointermodify, Tuple{Vararg{Any}})
+ @test effects == Compiler.Effects()
+end
+
+# JuliaLang/julia#57780
+let effects = Base.infer_effects(Base._unsetindex!, (MemoryRef{String},))
+ @test !Compiler.is_effect_free(effects)
+end
+
+# Core._svec_ref effects modeling (required for external abstract interpreter that doesn't run optimization)
+let effects = Base.infer_effects((Core.SimpleVector,Int); optimize=false) do svec, i
+ Core._svec_ref(svec, i)
+ end
+ @test !Compiler.is_consistent(effects)
+ @test Compiler.is_effect_free(effects)
+ @test !Compiler.is_nothrow(effects)
+ @test Compiler.is_terminates(effects)
+end
diff --git a/Compiler/test/inference.jl b/Compiler/test/inference.jl
index d4ea990e7d148..e8a10d537c1ad 100644
--- a/Compiler/test/inference.jl
+++ b/Compiler/test/inference.jl
@@ -1207,6 +1207,7 @@ let isdefined_tfunc(@nospecialize xs...) =
@test isdefined_tfunc(Union{UnionIsdefinedA,UnionIsdefinedB}, Const(:x)) === Const(true)
@test isdefined_tfunc(Union{UnionIsdefinedA,UnionIsdefinedB}, Const(:y)) === Const(false)
@test isdefined_tfunc(Union{UnionIsdefinedA,Nothing}, Const(:x)) === Bool
+ @test isdefined_tfunc(Nothing, Any) === Const(false)
end
# https://github.com/aviatesk/JET.jl/issues/379
@@ -6160,14 +6161,28 @@ end === Int
swapglobal!(@__MODULE__, :swapglobal!_xxx, x)
end === Union{}
-global swapglobal!_must_throw
-@newinterp SwapGlobalInterp
-Compiler.InferenceParams(::SwapGlobalInterp) = Compiler.InferenceParams(; assume_bindings_static=true)
+@newinterp AssumeBindingsStaticInterp
+Compiler.InferenceParams(::AssumeBindingsStaticInterp) = Compiler.InferenceParams(; assume_bindings_static=true)
+
+eval(Expr(:const, :swapglobal!_must_throw))
function func_swapglobal!_must_throw(x)
swapglobal!(@__MODULE__, :swapglobal!_must_throw, x)
end
-@test Base.infer_return_type(func_swapglobal!_must_throw, (Int,); interp=SwapGlobalInterp()) === Union{}
-@test !Compiler.is_effect_free(Base.infer_effects(func_swapglobal!_must_throw, (Int,); interp=SwapGlobalInterp()) )
+@test Base.infer_return_type(func_swapglobal!_must_throw, (Int,); interp=AssumeBindingsStaticInterp()) === Union{}
+@test !Compiler.is_effect_free(Base.infer_effects(func_swapglobal!_must_throw, (Int,); interp=AssumeBindingsStaticInterp()) )
+
+global global_decl_defined
+global_decl_defined = 42
+@test Base.infer_effects(; interp=AssumeBindingsStaticInterp()) do
+ global global_decl_defined
+ return global_decl_defined
+end |> Compiler.is_nothrow
+global global_decl_defined2::Int
+global_decl_defined2 = 42
+@test Base.infer_effects(; interp=AssumeBindingsStaticInterp()) do
+ global global_decl_defined2
+ return global_decl_defined2
+end |> Compiler.is_nothrow
@eval get_exception() = $(Expr(:the_exception))
@test Base.infer_return_type() do
@@ -6188,3 +6203,69 @@ end == Union{Float64,DomainError}
@test Compiler.argtypes_to_type(Any[ Int, UnitRange{Int}, Vararg{Pair{Any, Union{}}}, Float64 ]) === Tuple{Int, UnitRange{Int}, Float64}
@test Compiler.argtypes_to_type(Any[ Int, UnitRange{Int}, Vararg{Pair{Any, Union{}}}, Float64, Memory{2} ]) === Union{}
@test Base.return_types(Tuple{Tuple{Int, Vararg{Pair{Any, Union{}}}}},) do x; Returns(true)(x...); end |> only === Bool
+
+# issue #57292
+f57292(xs::Union{Tuple{String}, Int}...) = getfield(xs...)
+g57292(xs::String...) = getfield(("abc",), 1, :not_atomic, xs...)
+@test Base.infer_return_type(f57292) == String
+@test Base.infer_return_type(g57292) == String
+
+global invalid_setglobal!_exct_modeling::Int
+@test Base.infer_exception_type((Float64,)) do x
+ setglobal!(@__MODULE__, :invalid_setglobal!_exct_modeling, x)
+end == ErrorException
+
+# Issue #58257 - Hang in inference during BindingPartition resolution
+module A58257
+ module B58257
+ using ..A58257
+ # World age here is N
+ end
+ using .B58257
+ # World age here is N+1
+ @eval f() = $(GlobalRef(B58257, :get!))
+end
+
+## The sequence of events is critical here.
+A58257.get! # Creates binding partition in A, N+1:∞
+A58257.B58257.get! # Creates binding partition in A.B, N+1:∞
+Base.invoke_in_world(UInt(38678), getglobal, A58257, :get!) # Expands binding partition in A through f(IOContext{IO}(io), args...), f, args...)
+end
+
+let infos = Any[]
+ errors, parents = get_verify_typeinf_trim(infos)
+ @test isempty(errors)
+ @test isempty(parents)
+end
+
+finalizer(@nospecialize(f), @nospecialize(o)) = Core.finalizer(f, o)
+
+let infos = typeinf_ext_toplevel(Any[Core.svec(Nothing, Tuple{typeof(finalizer), typeof(identity), Any})], [Base.get_world_counter()], TRIM_UNSAFE)
+ errors, parents = get_verify_typeinf_trim(infos)
+ @test !isempty(errors) # unresolvable finalizer
+
+ # the only error should be a CallMissing error for the Core.finalizer builtin
+ (warn, desc) = only(errors)
+ @test !warn
+ @test desc isa CallMissing
+ @test occursin("finalizer", desc.desc)
+ repr = sprint(verify_print_error, desc, parents)
+ @test occursin(
+ r"""^unresolved finalizer registered from statement \(Core.finalizer\)\(f::Any, o::Any\)::Nothing
+ Stacktrace:
+ \[1\] finalizer\(f::Any, o::Any\)""", repr)
+end
+
+make_cfunction() = @cfunction(+, Float64, (Int64,Int64))
+
+# use TRIM_UNSAFE to bypass verifier inside typeinf_ext_toplevel
+let infos = typeinf_ext_toplevel(Any[Core.svec(Ptr{Cvoid}, Tuple{typeof(make_cfunction)})], [Base.get_world_counter()], TRIM_UNSAFE)
+ errors, parents = get_verify_typeinf_trim(infos)
+ @test_broken isempty(errors) # missing cfunction
+
+ desc = only(errors)
+ @test desc.first
+ desc = desc.second
+ @test desc isa CallMissing
+ @test occursin("cfunction", desc.desc)
+ repr = sprint(verify_print_error, desc, parents)
+ @test occursin(
+ r"""^unresolved cfunction from statement \$\(Expr\(:cfunction, Ptr{Nothing}, :\(\$\(QuoteNode\(\+\)\)\), Float64, :\(svec\(Int64, Int64\)::Core.SimpleVector\), :\(:ccall\)\)\)::Ptr{Nothing}
+ Stacktrace:
+ \[1\] make_cfunction\(\)""", repr)
+
+ resize!(infos, 1)
+ @test infos[1] isa Core.SimpleVector && infos[1][1] isa Type && infos[1][2] isa Type
+ errors, parents = get_verify_typeinf_trim(infos)
+ desc = only(errors)
+ @test !desc.first
+ desc = desc.second
+ @test desc isa CCallableMissing
+ @test desc.rt == Ptr{Cvoid}
+ @test desc.sig == Tuple{typeof(make_cfunction)}
+ @test occursin("unresolved ccallable", desc.desc)
+ repr = sprint(verify_print_error, desc, parents)
+ @test repr == "unresolved ccallable for Tuple{$(typeof(make_cfunction))} => Ptr{Nothing}\n\n"
+end
+
+let infos = typeinf_ext_toplevel(Any[Core.svec(Base.SecretBuffer, Tuple{Type{Base.SecretBuffer}})], [Base.get_world_counter()], TRIM_UNSAFE)
+ @test length(infos) > 4
+ errors, parents = get_verify_typeinf_trim(infos)
+ @test isempty(errors)
+
+ resize!(infos, 1)
+ @test infos[1] isa Core.SimpleVector && infos[1][1] isa Type && infos[1][2] isa Type
+ errors, parents = get_verify_typeinf_trim(infos)
+ desc = only(errors)
+ @test !desc.first
+ desc = desc.second
+ @test desc isa CCallableMissing
+ @test desc.rt == Base.SecretBuffer
+ @test desc.sig == Tuple{Type{Base.SecretBuffer}}
+ @test occursin("unresolved ccallable", desc.desc)
+ repr = sprint(verify_print_error, desc, parents)
+ @test repr == "unresolved ccallable for Tuple{Type{Base.SecretBuffer}} => Base.SecretBuffer\n\n"
+end
+
+let infos = typeinf_ext_toplevel(Any[Core.svec(Float64, Tuple{typeof(+), Int32, Int64})], [Base.get_world_counter()], TRIM_UNSAFE)
+ errors, parents = get_verify_typeinf_trim(infos)
+ desc = only(errors)
+ @test !desc.first
+ desc = desc.second
+ @test desc isa CCallableMissing
+ @test desc.rt == Int64
+ @test desc.sig == Tuple{typeof(+), Int32, Int64}
+ @test occursin("ccallable declared return type", desc.desc)
+ repr = sprint(verify_print_error, desc, parents)
+ @test repr == "ccallable declared return type does not match inference for Tuple{typeof(+), Int32, Int64} => Int64\n\n"
+end
+
+let infos = typeinf_ext_toplevel(Any[Core.svec(Int64, Tuple{typeof(ifelse), Bool, Int64, UInt64})], [Base.get_world_counter()], TRIM_UNSAFE)
+ errors, parents = get_verify_typeinf_trim(infos)
+ desc = only(errors)
+ @test desc.first
+ desc = desc.second
+ @test desc isa CCallableMissing
+ @test occursin("ccallable declared return type", desc.desc)
+ repr = sprint(verify_print_error, desc, parents)
+ @test repr == "ccallable declared return type does not match inference for Tuple{typeof(ifelse), Bool, Int64, UInt64} => Union{Int64, UInt64}\n\n"
+end
+
+let infos = typeinf_ext_toplevel(Any[Core.svec(Union{Int64,UInt64}, Tuple{typeof(ifelse), Bool, Int64, UInt64})], [Base.get_world_counter()], TRIM_SAFE)
+ errors, parents = get_verify_typeinf_trim(infos)
+ @test isempty(errors)
+ infos = typeinf_ext_toplevel(Any[Core.svec(Real, Tuple{typeof(ifelse), Bool, Int64, UInt64})], [Base.get_world_counter()], TRIM_SAFE)
+ errors, parents = get_verify_typeinf_trim(infos)
+ @test isempty(errors)
+end
diff --git a/HISTORY.md b/HISTORY.md
index c3ca212453d07..85fafc548f6a4 100644
--- a/HISTORY.md
+++ b/HISTORY.md
@@ -11,20 +11,6 @@ New language features
* The new macro `Base.Cartesian.@ncallkw` is analogous to `Base.Cartesian.@ncall`,
but allows to add keyword arguments to the function call ([#51501]).
* Support for Unicode 15.1 ([#51799]).
-* Three new types around the idea of text with "annotations" (`Pair{Symbol, Any}`
- entries, e.g. `:lang => "en"` or `:face => :magenta`). These annotations
- are preserved across operations (e.g. string concatenation with `*`) when
- possible.
- * `AnnotatedString` is a new `AbstractString` type. It wraps an underlying
- string and allows for annotations to be attached to regions of the string.
- This type is used extensively in the new `StyledStrings` standard library to
- hold styling information.
- * `AnnotatedChar` is a new `AbstractChar` type. It wraps another char and
- holds a list of annotations that apply to it.
- * `AnnotatedIOBuffer` is a new `IO` type that mimics an `IOBuffer`, but has
- specialised `read`/`write` methods for annotated content. This can be
- thought of both as a "string builder" of sorts and also as glue between
- annotated and unannotated content.
* `Manifest.toml` files can now be renamed in the format `Manifest-v{major}.{minor}.toml`
to be preferentially picked up by the given julia version. i.e. in the same folder,
a `Manifest-v1.11.toml` would be used by v1.11 and `Manifest.toml` by every other julia
@@ -126,7 +112,20 @@ Standard library changes
#### StyledStrings
-* A new standard library for handling styling in a more comprehensive and structured way ([#49586]).
+* A new experimental standard library for handling styling in a more comprehensive and structured way ([#49586]).
+* Three new types around the idea of text with "annotations" (`Pair{Symbol, Any}`
+ entries, e.g. `:lang => "en"` or `:face => :magenta`). These annotations
+ are preserved across operations (e.g. string concatenation with `*`) when
+ possible.
+ * `AnnotatedString` is a new `AbstractString` type. It wraps an underlying
+ string and allows for annotations to be attached to regions of the string.
+ This type is used extensively to hold styling information.
+ * `AnnotatedChar` is a new `AbstractChar` type. It wraps another char and
+ holds a list of annotations that apply to it.
+ * `AnnotatedIOBuffer` is a new `IO` type that mimics an `IOBuffer`, but has
+ specialised `read`/`write` methods for annotated content. This can be
+ thought of both as a "string builder" of sorts and also as glue between
+ annotated and unannotated content.
* The new `Faces` struct serves as a container for text styling information
(think typeface, as well as color and decoration), and comes with a framework
to provide a convenient, extensible (via `addface!`), and customisable (with a
diff --git a/Make.inc b/Make.inc
index 16e238c6f0683..466e97cc43c4f 100644
--- a/Make.inc
+++ b/Make.inc
@@ -111,6 +111,11 @@ endef
COMMA:=,
SPACE:=$(eval) $(eval)
+# define various helper macros for safe interpolation into various parsers
+shell_escape='$(subst ','\'',$1)'
+c_escape="$(subst ",\",$(subst \,\\,$1))"
+julia_escape=$(call c_escape,$1)
+
# force a sane / stable configuration
export LC_ALL=C
export LANG=C
@@ -644,12 +649,12 @@ CXX += -Qunused-arguments
export CCACHE_CPP2 := yes
endif
else #USECCACHE
-CC_BASE := $(shell echo $(CC) | cut -d' ' -f1)
-CC_ARG := $(shell echo $(CC) | cut -s -d' ' -f2-)
-CXX_BASE := $(shell echo $(CXX) | cut -d' ' -f1)
-CXX_ARG := $(shell echo $(CXX) | cut -s -d' ' -f2-)
-FC_BASE := $(shell echo $(FC) 2>/dev/null | cut -d' ' -f1)
-FC_ARG := $(shell echo $(FC) 2>/dev/null | cut -s -d' ' -f2-)
+CC_BASE := $(shell printf "%s\n" $(call shell_escape,$(CC)) | cut -d' ' -f1)
+CC_ARG := $(shell printf "%s\n" $(call shell_escape,$(CC)) | cut -s -d' ' -f2-)
+CXX_BASE := $(shell printf "%s\n" $(call shell_escape,$(CXX)) | cut -d' ' -f1)
+CXX_ARG := $(shell printf "%s\n" $(call shell_escape,$(CXX)) | cut -s -d' ' -f2-)
+FC_BASE := $(shell printf "%s\n" $(call shell_escape,$(FC)) 2>/dev/null | cut -d' ' -f1)
+FC_ARG := $(shell printf "%s\n" $(call shell_escape,$(FC)) 2>/dev/null | cut -s -d' ' -f2-)
endif
JFFLAGS := -O2 $(fPIC)
@@ -776,7 +781,7 @@ LDFLAGS += -L$(build_libdir) -Wl,-rpath,$(build_libdir)
endif # gfortran
endif # FreeBSD
-ifneq ($(CC_BASE)$(CXX_BASE),$(shell echo $(CC) | cut -d' ' -f1)$(shell echo $(CXX) | cut -d' ' -f1))
+ifneq ($(CC_BASE)$(CXX_BASE),$(shell printf "%s\n" $(call shell_escape,$(CC)) | cut -d' ' -f1)$(shell printf "%s\n" $(call shell_escape,$(CXX)) | cut -d' ' -f1))
$(error Forgot override directive on CC or CXX in Make.user? Cowardly refusing to build)
endif
@@ -1663,6 +1668,12 @@ $(subst /,\\,$(subst $(shell $(2) pwd),$(shell $(2) cmd //C cd),$(abspath $(1)))
endef
endif
+ifeq ($(OS), WINNT)
+normalize_path = $(subst /,\,$1)
+else
+normalize_path = $1
+endif
+
define symlink_target # (from, to-dir, to-name)
CLEAN_TARGETS += clean-$$(abspath $(2)/$(3))
clean-$$(abspath $(2)/$(3)):
@@ -1729,20 +1740,20 @@ JULIA_SYSIMG_release := $(build_private_libdir)/sys.$(SHLIB_EXT)
JULIA_SYSIMG := $(JULIA_SYSIMG_$(JULIA_BUILD_MODE))
define dep_lib_path
-$(shell $(PYTHON) $(call python_cygpath,$(JULIAHOME)/contrib/relative_path.py) $(1) $(2))
+$(call normalize_path,$(shell $(PYTHON) $(call python_cygpath,$(JULIAHOME)/contrib/relative_path.py) $(1) $(2)))
endef
-LIBJULIAINTERNAL_BUILD_DEPLIB := $(call dep_lib_path,$(build_libdir),$(build_shlibdir)/libjulia-internal.$(JL_MAJOR_SHLIB_EXT))
-LIBJULIAINTERNAL_INSTALL_DEPLIB := $(call dep_lib_path,$(libdir),$(private_shlibdir)/libjulia-internal.$(JL_MAJOR_SHLIB_EXT))
+LIBJULIAINTERNAL_BUILD_DEPLIB := $(call dep_lib_path,$(build_shlibdir),$(build_shlibdir)/libjulia-internal.$(JL_MAJOR_SHLIB_EXT))
+LIBJULIAINTERNAL_INSTALL_DEPLIB := $(call dep_lib_path,$(shlibdir),$(private_shlibdir)/libjulia-internal.$(JL_MAJOR_SHLIB_EXT))
-LIBJULIAINTERNAL_DEBUG_BUILD_DEPLIB := $(call dep_lib_path,$(build_libdir),$(build_shlibdir)/libjulia-internal-debug.$(JL_MAJOR_SHLIB_EXT))
-LIBJULIAINTERNAL_DEBUG_INSTALL_DEPLIB := $(call dep_lib_path,$(libdir),$(private_shlibdir)/libjulia-internal-debug.$(JL_MAJOR_SHLIB_EXT))
+LIBJULIAINTERNAL_DEBUG_BUILD_DEPLIB := $(call dep_lib_path,$(build_shlibdir),$(build_shlibdir)/libjulia-internal-debug.$(JL_MAJOR_SHLIB_EXT))
+LIBJULIAINTERNAL_DEBUG_INSTALL_DEPLIB := $(call dep_lib_path,$(shlibdir),$(private_shlibdir)/libjulia-internal-debug.$(JL_MAJOR_SHLIB_EXT))
-LIBJULIACODEGEN_BUILD_DEPLIB := $(call dep_lib_path,$(build_libdir),$(build_shlibdir)/libjulia-codegen.$(JL_MAJOR_SHLIB_EXT))
-LIBJULIACODEGEN_INSTALL_DEPLIB := $(call dep_lib_path,$(libdir),$(private_shlibdir)/libjulia-codegen.$(JL_MAJOR_SHLIB_EXT))
+LIBJULIACODEGEN_BUILD_DEPLIB := $(call dep_lib_path,$(build_shlibdir),$(build_shlibdir)/libjulia-codegen.$(JL_MAJOR_SHLIB_EXT))
+LIBJULIACODEGEN_INSTALL_DEPLIB := $(call dep_lib_path,$(shlibdir),$(private_shlibdir)/libjulia-codegen.$(JL_MAJOR_SHLIB_EXT))
-LIBJULIACODEGEN_DEBUG_BUILD_DEPLIB := $(call dep_lib_path,$(build_libdir),$(build_shlibdir)/libjulia-codegen-debug.$(JL_MAJOR_SHLIB_EXT))
-LIBJULIACODEGEN_DEBUG_INSTALL_DEPLIB := $(call dep_lib_path,$(libdir),$(private_shlibdir)/libjulia-codegen-debug.$(JL_MAJOR_SHLIB_EXT))
+LIBJULIACODEGEN_DEBUG_BUILD_DEPLIB := $(call dep_lib_path,$(build_shlibdir),$(build_shlibdir)/libjulia-codegen-debug.$(JL_MAJOR_SHLIB_EXT))
+LIBJULIACODEGEN_DEBUG_INSTALL_DEPLIB := $(call dep_lib_path,$(shlibdir),$(private_shlibdir)/libjulia-codegen-debug.$(JL_MAJOR_SHLIB_EXT))
ifeq ($(OS),WINNT)
ifeq ($(BINARY),32)
@@ -1770,34 +1781,34 @@ endif
# USE_SYSTEM_CSL causes it to get symlinked into build_private_shlibdir
ifeq ($(USE_SYSTEM_CSL),1)
-LIBGCC_BUILD_DEPLIB := $(call dep_lib_path,$(build_libdir),$(build_private_shlibdir)/$(LIBGCC_NAME))
+LIBGCC_BUILD_DEPLIB := $(call dep_lib_path,$(build_shlibdir),$(build_private_shlibdir)/$(LIBGCC_NAME))
else
-LIBGCC_BUILD_DEPLIB := $(call dep_lib_path,$(build_libdir),$(build_shlibdir)/$(LIBGCC_NAME))
+LIBGCC_BUILD_DEPLIB := $(call dep_lib_path,$(build_shlibdir),$(build_shlibdir)/$(LIBGCC_NAME))
endif
-LIBGCC_INSTALL_DEPLIB := $(call dep_lib_path,$(libdir),$(private_shlibdir)/$(LIBGCC_NAME))
+LIBGCC_INSTALL_DEPLIB := $(call dep_lib_path,$(shlibdir),$(private_shlibdir)/$(LIBGCC_NAME))
# We only bother to define this on Linux, as that's the only platform that does libstdc++ probing
# On all other platforms, the LIBSTDCXX_*_DEPLIB variables will be empty.
ifeq ($(OS),Linux)
LIBSTDCXX_NAME := libstdc++.so.6
ifeq ($(USE_SYSTEM_CSL),1)
-LIBSTDCXX_BUILD_DEPLIB := $(call dep_lib_path,$(build_libdir),$(build_private_shlibdir)/$(LIBSTDCXX_NAME))
+LIBSTDCXX_BUILD_DEPLIB := $(call dep_lib_path,$(build_shlibdir),$(build_private_shlibdir)/$(LIBSTDCXX_NAME))
else
-LIBSTDCXX_BUILD_DEPLIB := $(call dep_lib_path,$(build_libdir),$(build_shlibdir)/$(LIBSTDCXX_NAME))
+LIBSTDCXX_BUILD_DEPLIB := $(call dep_lib_path,$(build_shlibdir),$(build_shlibdir)/$(LIBSTDCXX_NAME))
endif
-LIBSTDCXX_INSTALL_DEPLIB := $(call dep_lib_path,$(libdir),$(private_shlibdir)/$(LIBSTDCXX_NAME))
+LIBSTDCXX_INSTALL_DEPLIB := $(call dep_lib_path,$(shlibdir),$(private_shlibdir)/$(LIBSTDCXX_NAME))
endif
# USE_SYSTEM_LIBM and USE_SYSTEM_OPENLIBM causes it to get symlinked into build_private_shlibdir
ifeq ($(USE_SYSTEM_LIBM),1)
-LIBM_BUILD_DEPLIB := $(call dep_lib_path,$(build_libdir),$(build_private_shlibdir)/$(LIBMNAME).$(SHLIB_EXT))
+LIBM_BUILD_DEPLIB := $(call dep_lib_path,$(build_shlibdir),$(build_private_shlibdir)/$(LIBMNAME).$(SHLIB_EXT))
else ifeq ($(USE_SYSTEM_OPENLIBM),1)
-LIBM_BUILD_DEPLIB := $(call dep_lib_path,$(build_libdir),$(build_private_shlibdir)/$(LIBMNAME).$(SHLIB_EXT))
+LIBM_BUILD_DEPLIB := $(call dep_lib_path,$(build_shlibdir),$(build_private_shlibdir)/$(LIBMNAME).$(SHLIB_EXT))
else
-LIBM_BUILD_DEPLIB := $(call dep_lib_path,$(build_libdir),$(build_shlibdir)/$(LIBMNAME).$(SHLIB_EXT))
+LIBM_BUILD_DEPLIB := $(call dep_lib_path,$(build_shlibdir),$(build_shlibdir)/$(LIBMNAME).$(SHLIB_EXT))
endif
-LIBM_INSTALL_DEPLIB := $(call dep_lib_path,$(libdir),$(private_shlibdir)/$(LIBMNAME).$(SHLIB_EXT))
+LIBM_INSTALL_DEPLIB := $(call dep_lib_path,$(shlibdir),$(private_shlibdir)/$(LIBMNAME).$(SHLIB_EXT))
# We list:
# * libgcc_s, because FreeBSD needs to load ours, not the system one.
@@ -1861,7 +1872,7 @@ ifeq ($(VERBOSE), 0)
QUIET_MAKE = -s
-GOAL=$(subst ','\'',$(subst $(abspath $(JULIAHOME))/,,$(abspath $@)))
+GOAL=$(call shell_escape,$(subst $(abspath $(JULIAHOME))/,,$(abspath $@)))
PRINT_CC = printf ' %b %b\n' $(CCCOLOR)CC$(ENDCOLOR) $(SRCCOLOR)$(GOAL)$(ENDCOLOR); $(1)
PRINT_ANALYZE = printf ' %b %b\n' $(CCCOLOR)ANALYZE$(ENDCOLOR) $(SRCCOLOR)$(GOAL)$(ENDCOLOR); $(1)
@@ -1873,13 +1884,13 @@ PRINT_DTRACE = printf ' %b %b\n' $(DTRACECOLOR)DTRACE$(ENDCOLOR) $(BINCOLOR)$
else
QUIET_MAKE =
-PRINT_CC = echo '$(subst ','\'',$(1))'; $(1)
-PRINT_ANALYZE = echo '$(subst ','\'',$(1))'; $(1)
-PRINT_LINK = echo '$(subst ','\'',$(1))'; $(1)
-PRINT_PERL = echo '$(subst ','\'',$(1))'; $(1)
-PRINT_FLISP = echo '$(subst ','\'',$(1))'; $(1)
-PRINT_JULIA = echo '$(subst ','\'',$(1))'; $(1)
-PRINT_DTRACE = echo '$(subst ','\'',$(1))'; $(1)
+PRINT_CC = printf "%s\n" $(call shell_escape,$(1)); $(1)
+PRINT_ANALYZE = printf "%s\n" $(call shell_escape,$(1)); $(1)
+PRINT_LINK = printf "%s\n" $(call shell_escape,$(1)); $(1)
+PRINT_PERL = printf "%s\n" $(call shell_escape,$(1)); $(1)
+PRINT_FLISP = printf "%s\n" $(call shell_escape,$(1)); $(1)
+PRINT_JULIA = printf "%s\n" $(call shell_escape,$(1)); $(1)
+PRINT_DTRACE = printf "%s\n" $(call shell_escape,$(1)); $(1)
endif # VERBOSE
@@ -1887,4 +1898,4 @@ endif # VERBOSE
# call print-VARIABLE to see the runtime value of any variable
# (hardened against any special characters appearing in the output)
print-%:
- @echo '$*=$(subst ','\'',$(subst $(newline),\n,$($*)))'
+ @printf "%s\n" $(call shell_escape,$*)=$(call shell_escape,$(subst $(newline),\n,$($*)))
diff --git a/Makefile b/Makefile
index b193b3849c6aa..3a529177b62de 100644
--- a/Makefile
+++ b/Makefile
@@ -4,18 +4,18 @@ include $(JULIAHOME)/Make.inc
include $(JULIAHOME)/deps/llvm-ver.make
# Make sure the user didn't try to build in a path that will confuse the shell or make
-METACHARACTERS := [][?*{}() $$%:;&|!\#,\\`\":]\|/\./\|/\.\./
+METACHARACTERS := [][?*{}() $$%:;&|!\#,\\`\": ]\|/\./\|/\.\./
ifneq (,$(findstring ',$(value BUILDROOT)))
$(error cowardly refusing to build into directory with a single-quote in the path)
endif
ifneq (,$(findstring ',$(value JULIAHOME)))
$(error cowardly refusing to build from source directory with a single-quote in the path)
endif
-ifneq (,$(shell echo '$(value BUILDROOT)/' | grep '$(METACHARACTERS)'))
+ifneq (,$(shell printf "%s\n" $(call shell_escape,$(value BUILDROOT)/) | grep '$(METACHARACTERS)'))
$(error cowardly refusing to build into directory with a shell-metacharacter in the path\
(got: $(value BUILDROOT)))
endif
-ifneq (,$(shell echo '$(value JULIAHOME)/' | grep '$(METACHARACTERS)'))
+ifneq (,$(shell printf "%s\n" $(call shell_escape,$(value JULIAHOME)/) | grep '$(METACHARACTERS)'))
$(error cowardly refusing to build from source directory with a shell-metacharacter in the path\
(got: $(value JULIAHOME)))
endif
@@ -33,9 +33,9 @@ BUILDDIRMAKE := $(addsuffix /Makefile,$(BUILDDIRS)) $(BUILDROOT)/sysimage.mk $(B
DIRS += $(BUILDDIRS)
$(BUILDDIRMAKE): | $(BUILDDIRS)
@# add Makefiles to the build directories for convenience (pointing back to the source location of each)
- @echo '# -- This file is automatically generated in julia/Makefile -- #' > $@
- @echo 'BUILDROOT=$(BUILDROOT)' >> $@
- @echo 'include $(JULIAHOME)$(patsubst $(BUILDROOT)%,%,$@)' >> $@
+ @printf "%s\n" '# -- This file is automatically generated in julia/Makefile -- #' > $@
+ @printf "%s\n" 'BUILDROOT=$(BUILDROOT)' >> $@
+ @printf "%s\n" 'include $(JULIAHOME)$(patsubst $(BUILDROOT)%,%,$@)' >> $@
julia-deps: | $(BUILDDIRMAKE)
configure-y: | $(BUILDDIRMAKE)
configure:
@@ -67,7 +67,7 @@ $(BUILDROOT)/doc/_build/html/en/index.html: $(shell find $(BUILDROOT)/base $(BUI
julia-symlink: julia-cli-$(JULIA_BUILD_MODE)
ifeq ($(OS),WINNT)
- echo '@"%~dp0/'"$$(echo '$(call rel_path,$(BUILDROOT),$(JULIA_EXECUTABLE))')"'" %*' | tr / '\\' > $(BUILDROOT)/julia.bat
+ printf '@"%%~dp0/%s" %%*\n' "$$(printf "%s\n" '$(call rel_path,$(BUILDROOT),$(JULIA_EXECUTABLE))')" | tr / '\\' > $(BUILDROOT)/julia.bat
chmod a+x $(BUILDROOT)/julia.bat
else
ifndef JULIA_VAGRANT_BUILD
@@ -177,6 +177,7 @@ release-candidate: release testall
@echo 14. Push to Juliaup (https://github.com/JuliaLang/juliaup/wiki/Adding-a-Julia-version)
@echo 15. Announce on mailing lists
@echo 16. Change master to release-0.X in base/version.jl and base/version_git.sh as in 4cb1e20
+ @echo 17. Move NEWS.md contents to HISTORY.md
@echo
$(build_man1dir)/julia.1: $(JULIAHOME)/doc/man/julia.1 | $(build_man1dir)
@@ -282,13 +283,31 @@ endif
endif
ifneq (${MMTK_PLAN},None)
+# Make sure we use the right version of $MMTK_PLAN, $MMTK_MOVING and $MMTK_BUILD
+# if we use the BinaryBuilder version of mmtk-julia
+ifeq ($(USE_BINARYBUILDER_MMTK_JULIA),1)
+ifeq (${MMTK_PLAN},Immix)
+LIB_PATH_PLAN = immix
+else ifeq (${MMTK_PLAN},StickyImmix)
+LIB_PATH_PLAN = sticky
+endif
+
+ifeq ($(MMTK_MOVING), 0)
+LIB_PATH_MOVING := non_moving
+else
+LIB_PATH_MOVING := moving
+endif
+
+JL_PRIVATE_LIBS-0 += $(LIB_PATH_PLAN)/$(LIB_PATH_MOVING)/$(MMTK_BUILD)/libmmtk_julia
+else
JL_PRIVATE_LIBS-0 += libmmtk_julia
endif
+endif
# Note that we disable MSYS2's path munging here, as otherwise
# it replaces our `:`-separated list as a `;`-separated one.
define stringreplace
- MSYS2_ARG_CONV_EXCL='*' $(build_depsbindir)/stringreplace $$(strings -t x - '$1' | grep "$2" | awk '{print $$1;}') "$3" 255 "$(call cygpath_w,$1)"
+ MSYS2_ARG_CONV_EXCL='*' $(build_depsbindir)/stringreplace $$(strings -t x - '$1' | grep '$2' | awk '{print $$1;}') '$3' 255 '$(call cygpath_w,$1)'
endef
diff --git a/NEWS.md b/NEWS.md
index 53643ee2c0954..6a2c3b8f28a21 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -4,83 +4,82 @@ Julia v1.12 Release Notes
New language features
---------------------
-- New option `--trim` creates smaller binaries by removing code that was not proven to be reachable from
- the entry points. Entry points can be marked using `Base.Experimental.entrypoint` ([#55047]).
-- A new keyword argument `usings::Bool` has been added to `names`. By using this, we can now
- find all the names available in module `A` by `names(A; all=true, imported=true, usings=true)`. ([#54609])
-- the `@atomic(...)` macro family supports now the reference assignment syntax, e.g.
- `@atomic :monotonic v[3] += 4` modifies `v[3]` atomically with monotonic ordering semantics. ([#54707])
+* New option `--trim` creates smaller binaries by removing code that was not proven to be reachable from
+ entry points. Entry points can be marked using `Base.Experimental.entrypoint` ([#55047]).
+* Redefinition of constants is now well defined and follows world age semantics. Additional redefinitions (e.g. of structs) are now allowed. See [the new manual chapter on world age](https://docs.julialang.org/en/v1.13-dev/manual/worldage/).
+* A new keyword argument `usings::Bool` has been added to `names`, returning all names visible
+ via `using` ([#54609]).
+* The `@atomic` macro family now supports reference assignment syntax, e.g. `@atomic :monotonic v[3] += 4`,
+ which modifies `v[3]` atomically with monotonic ordering semantics ([#54707]).
The supported syntax allows
- - atomic fetch (`x = @atomic v[3]`),
- - atomic set (`@atomic v[3] = 4`),
- - atomic modify (`@atomic v[3] += 2`),
- - atomic set once (`@atomiconce v[3] = 2`),
- - atomic swap (`x = @atomicswap v[3] = 2`), and
- - atomic replace (`x = @atomicreplace v[3] 2=>5`).
-- New option `--task-metrics=yes` to enable the collection of per-task timing information,
- which can also be enabled/disabled at runtime with `Base.Experimental.task_metrics(::Bool)`. ([#56320])
+ * atomic fetch (`x = @atomic v[3]`),
+ * atomic set (`@atomic v[3] = 4`),
+ * atomic modify (`@atomic v[3] += 2`),
+ * atomic set once (`@atomiconce v[3] = 2`),
+ * atomic swap (`x = @atomicswap v[3] = 2`), and
+ * atomic replace (`x = @atomicreplace v[3] 2=>5`).
+* New option `--task-metrics=yes` to enable the collection of per-task timing information,
+ which can also be enabled/disabled at runtime with `Base.Experimental.task_metrics(::Bool)` ([#56320]).
The available metrics are:
- - actual running time for the task (`Base.Experimental.task_running_time_ns`), and
- - wall-time for the task (`Base.Experimental.task_wall_time_ns`).
-- Support for Unicode 16 ([#56925]).
-- `Threads.@spawn` now takes a `:samepool` argument to specify the same threadpool as the caller.
- `Threads.@spawn :samepool foo()` which is shorthand for `Threads.@spawn Threads.threadpool() foo()` ([#57109])
+ * actual running time for the task (`Base.Experimental.task_running_time_ns`), and
+ * wall-time for the task (`Base.Experimental.task_wall_time_ns`).
+* Support for Unicode 16 ([#56925]).
+* `Threads.@spawn` now takes a `:samepool` argument to specify the same threadpool as the caller.
+ `Threads.@spawn :samepool foo()` which is shorthand for `Threads.@spawn Threads.threadpool() foo()` ([#57109]).
+* The `@ccall` macro can now take a `gc_safe` argument, that if set to true allows the runtime to run garbage collection concurrently to the `ccall` ([#49933]).
Language changes
----------------
- - Julia now defaults to 1 "interactive" thread, in addition to the 1 "default" worker thread. i.e. `-t1,1`
+* Julia now defaults to 1 "interactive" thread, in addition to the 1 default "worker" thread. i.e. `-t1,1`.
This means in default configuration the main task and repl (when in interactive mode), which both run on
- thread 1, now run within the `interactive` threadpool. Also the libuv IO loop runs on thread 1,
- helping efficient utilization of the "default" worker threadpool, which is what `Threads.@threads` and a bare
- `Threads.@spawn` uses. Use `0` to disable the interactive thread i.e. `-t1,0` or `JULIA_NUM_THREADS=1,0`, or
- `-tauto,0` etc. The zero is explicitly required to disable it, `-t2` will set the equivalent of `-t2,1` ([#57087])
-
- - When methods are replaced with exactly equivalent ones, the old method is no
- longer deleted implicitly simultaneously, although the new method does take
- priority and become more specific than the old method. Thus if the new
- method is deleted later, the old method will resume operating. This can be
- useful to mocking frameworks (such as in SparseArrays, Pluto, and Mocking,
- among others), as they do not need to explicitly restore the old method.
- While inference and compilation still must be repeated with this, it also
- may pave the way for inference to be able to intelligently re-use the old
- results, once the new method is deleted. ([#53415])
-
- - Macro expansion will no longer eagerly recurse into `Expr(:toplevel)`
- expressions returned from macros. Instead, macro expansion of `:toplevel`
- expressions will be delayed until evaluation time. This allows a later
- expression within a given `:toplevel` expression to make use of macros
- defined earlier in the same `:toplevel` expression. ([#53515])
-
- - Trivial infinite loops (like `while true; end`) are no longer undefined
- behavior. Infinite loops that actually do things (e.g. have side effects
- or sleep) were never and are still not undefined behavior. ([#52999])
-
- - It is now an error to mark a binding as both `public` and `export`ed.
- ([#53664])
+ thread 1, now run within the `interactive` threadpool. The libuv IO loop also runs on thread 1,
+ helping efficient utilization of the worker threadpool used by `Threads.@spawn`. Pass `0` to disable the
+ interactive thread i.e. `-t1,0` or `JULIA_NUM_THREADS=1,0`, or `-tauto,0` etc. The zero is explicitly
+ required to disable it, `-t2` will set the equivalent of `-t2,1` ([#57087]).
+* When a method is replaced with an exactly equivalent one, the old method is not deleted. Instead, the
+ new method takes priority and becomes more specific than the old method. Thus if the new method is deleted
+ later, the old method will resume operating. This can be useful in mocking frameworks (as in SparseArrays,
+ Pluto, and Mocking, among others), as they do not need to explicitly restore the old method.
+ At this time, inference and compilation must be repeated in this situation, but we may eventually be
+ able to re-use the old results ([#53415]).
+* Macro expansion will no longer eagerly recurse into `Expr(:toplevel)` expressions returned from macros.
+ Instead, macro expansion of `:toplevel` expressions will be delayed until evaluation time. This allows a
+ later expression within a given `:toplevel` expression to make use of macros defined earlier in the same
+ `:toplevel` expression ([#53515]).
+* Trivial infinite loops (like `while true; end`) are no longer undefined behavior. Infinite loops that
+ do things (e.g. have side effects or sleep) were never and are still not undefined behavior ([#52999]).
+* It is now an error to mark a binding as both `public` and `export`ed ([#53664]).
+* Errors during `getfield` now raise a new `FieldError` exception type instead of the generic
+ `ErrorException` ([#54504]).
+* Macros in function-signature-position no longer require parentheses. E.g. `function @main(args) ... end` is now permitted, whereas `function (@main)(args) ... end` was required in prior Julia versions.
+* Calling `using` on a package name inside of that package of that name (especially relevant
+ for a submodule) now explicitly uses that package without examining the Manifest and
+ environment, which is identical to the behavior of `..Name`. This appears to better match
+ how users expect this to behave in the wild. ([#57727])
+
Compiler/Runtime improvements
-----------------------------
-- Generated LLVM IR now uses actual pointer types instead of passing pointers as integers.
+* Generated LLVM IR now uses pointer types instead of passing pointers as integers.
This affects `llvmcall`: Inline LLVM IR should be updated to use `i8*` or `ptr` instead of
`i32` or `i64`, and remove unneeded `ptrtoint`/`inttoptr` conversions. For compatibility,
- IR with integer pointers is still supported, but generates a deprecation warning. ([#53687])
-
-- A new exception `FieldError` is now introduced to raise/handle `getfield` exceptions. Previously `getfield` exception was captured by fallback generic exception `ErrorException`. Now that `FieldError` is more specific `getfield` related exceptions that can occur should use `FieldError` exception instead. ([#54504])
+ IR with integer pointers is still supported, but generates a deprecation warning ([#53687]).
Command-line option changes
---------------------------
* The `-m/--module` flag can be passed to run the `main` function inside a package with a set of arguments.
- This `main` function should be declared using `@main` to indicate that it is an entry point. ([#52103])
+ This `main` function should be declared using `@main` to indicate that it is an entry point ([#52103]).
* Enabling or disabling color text in Julia can now be controlled with the
[`NO_COLOR`](https://no-color.org/) or [`FORCE_COLOR`](https://force-color.org/) environment
variables. These variables are also honored by Julia's build system ([#53742], [#56346]).
-* `--project=@temp` starts Julia with a temporary environment. ([#51149])
+* `--project=@temp` starts Julia with a temporary environment ([#51149]).
* New `--trace-compile-timing` option to report how long each method reported by `--trace-compile` took
- to compile, in ms. ([#54662])
-* `--trace-compile` now prints recompiled methods in yellow or with a trailing comment if color is not supported ([#55763])
+ to compile, in ms ([#54662]).
+* `--trace-compile` now prints recompiled methods in yellow or with a trailing comment if color is not
+ supported ([#55763]).
* New `--trace-dispatch` option to report methods that are dynamically dispatched ([#55848]).
Multi-threading changes
@@ -90,45 +89,49 @@ Multi-threading changes
a `OncePerProcess{T}` type, which allows defining a function that should be run exactly once
the first time it is called, and then always return the same result value of type `T`
every subsequent time afterwards. There are also `OncePerThread{T}` and `OncePerTask{T}` types for
- similar usage with threads or tasks. ([#55793])
+ similar usage with threads or tasks ([#55793]).
Build system changes
--------------------
-* There are new `Makefile`s to build Julia and LLVM using the Binary Optimization and Layout Tool (BOLT), see `contrib/bolt` and `contrib/pgo-lto-bolt` ([#54107]).
+* There are new `Makefile`s to build Julia and LLVM using the Binary Optimization and Layout Tool (BOLT).
+ See `contrib/bolt` and `contrib/pgo-lto-bolt` ([#54107]).
New library functions
---------------------
-* `logrange(start, stop; length)` makes a range of constant ratio, instead of constant step ([#39071])
-* The new `isfull(c::Channel)` function can be used to check if `put!(c, some_value)` will block. ([#53159])
-* `waitany(tasks; throw=false)` and `waitall(tasks; failfast=false, throw=false)` which wait multiple tasks at once ([#53341]).
-* `uuid7()` creates an RFC 9652 compliant UUID with version 7 ([#54834]).
-* `insertdims(array; dims)` allows to insert singleton dimensions into an array which is the inverse operation to `dropdims`. ([#45793])
-* The new `Fix` type is a generalization of `Fix1/Fix2` for fixing a single argument ([#54653]).
-* `Sys.detectwsl()` allows to testing if Julia is running inside WSL at runtime. ([#57069])
+* `logrange(start, stop; length)` makes a range of constant ratio, instead of constant step ([#39071]).
+* The new `isfull(c::Channel)` function can be used to check if `put!(c, some_value)` will block ([#53159]).
+* `waitany(tasks; throw=false)` and `waitall(tasks; failfast=false, throw=false)` which wait for multiple tasks
+ at once ([#53341]).
+* `uuid7()` creates an RFC 9562 compliant UUID with version 7 ([#54834]).
+* `insertdims(array; dims)` inserts singleton dimensions into an array --- the inverse operation of
+ `dropdims` ([#45793]).
+* A new `Fix` type generalizes `Fix1/Fix2` for fixing a single argument ([#54653]).
+* `Sys.detectwsl()` tests whether Julia is running inside WSL at runtime ([#57069]).
New library features
--------------------
-* `escape_string` takes additional keyword arguments `ascii=true` (to escape all
- non-ASCII characters) and `fullhex=true` (to require full 4/8-digit hex numbers
- for u/U escapes, e.g. for C compatibility) ([#55099]).
+* `escape_string` takes additional keyword arguments `ascii=true` (to escape all non-ASCII characters) and
+ `fullhex=true` (to require full 4/8-digit hex numbers for u/U escapes, e.g. for C compatibility) ([#55099]).
* `tempname` can now take a suffix string to allow the file name to include a suffix and include that suffix in
- the uniquing checking ([#53474])
-* `RegexMatch` objects can now be used to construct `NamedTuple`s and `Dict`s ([#50988])
-* `Lockable` is now exported ([#54595])
-* `Base.require_one_based_indexing` and `Base.has_offset_axes` are now public ([#56196])
-* New `ltruncate`, `rtruncate` and `ctruncate` functions for truncating strings to text width, accounting for char widths ([#55351])
-* `isless` (and thus `cmp`, sorting, etc.) is now supported for zero-dimensional `AbstractArray`s ([#55772])
-* `invoke` now supports passing a Method instead of a type signature making this interface somewhat more flexible for certain uncommon use cases ([#56692]).
-* `Timer(f, ...)` will now match the stickiness of the parent task when creating timer tasks, which can be overridden
- by the new `spawn` kwarg. This avoids the issue where sticky tasks i.e. `@async` make their parent sticky ([#56745])
-* `invoke` now supports passing a CodeInstance instead of a type, which can enable
-certain compiler plugin workflows ([#56660]).
-* `sort` now supports `NTuple`s ([#54494])
-* `map!(f, A)` now stores the results in `A`, like `map!(f, A, A)`. or `A .= f.(A)` ([#40632]).
-* `Timer` now has readable `timeout` and `interval` properties, and a more descriptive show method ([#57081])
+ the uniquing checking ([#53474]).
+* `RegexMatch` objects can now be used to construct `NamedTuple`s and `Dict`s ([#50988]).
+* `Lockable` is now exported ([#54595]).
+* `Base.require_one_based_indexing` and `Base.has_offset_axes` are now public ([#56196]).
+* New `ltruncate`, `rtruncate` and `ctruncate` functions for truncating strings to text width, accounting for
+ char widths ([#55351]).
+* `isless` (and thus `cmp`, sorting, etc.) is now supported for zero-dimensional `AbstractArray`s ([#55772]).
+* `invoke` now supports passing a `Method` instead of a type signature ([#56692]).
+* `invoke` now supports passing a `CodeInstance` instead of a type, which can enable certain compiler plugin
+ workflows ([#56660]).
+* `Timer(f, ...)` will now match the stickiness of the parent task when creating timer tasks, which can be
+ overridden by the new `spawn` keyword argument. This avoids the issue where sticky tasks (i.e. `@async`)
+ make their parent sticky ([#56745]).
+* `Timer` now has readable `timeout` and `interval` properties, and a more descriptive `show` method ([#57081]).
+* `sort` now supports `NTuple`s ([#54494]).
+* `map!(f, A)` now stores the results in `A`, like `map!(f, A, A)` or `A .= f.(A)` ([#40632]).
Standard library changes
------------------------
@@ -136,27 +139,23 @@ Standard library changes
* `gcdx(0, 0)` now returns `(0, 0, 0)` instead of `(0, 1, 0)` ([#40989]).
* `fd` returns a `RawFD` instead of an `Int` ([#55080]).
-#### StyledStrings
-
#### JuliaSyntaxHighlighting
-* A new standard library for applying syntax highlighting to Julia code, this
- uses `JuliaSyntax` and `StyledStrings` to implement a `highlight` function
- that creates an `AnnotatedString` with syntax highlighting applied. ([#51810])
-
-#### Package Manager
+* A new standard library for applying syntax highlighting to Julia code, this uses `JuliaSyntax` and
+ `StyledStrings` to implement a `highlight` function that creates an `AnnotatedString` with syntax highlighting
+ applied ([#51810]).
#### LinearAlgebra
* `rank` can now take a `QRPivoted` matrix to allow rank estimation via QR factorization ([#54283]).
-* Added keyword argument `alg` to `eigen`, `eigen!`, `eigvals` and `eigvals!` for self-adjoint
- matrix types (i.e., the type union `RealHermSymComplexHerm`) that allows one to switch
- between different eigendecomposition algorithms ([#49355]).
-* Added a generic version of the (unblocked) pivoted Cholesky decomposition
- (callable via `cholesky[!](A, RowMaximum())`) ([#54619]).
-* The number of default BLAS threads now respects process affinity, instead of
- using total number of logical threads available on the system ([#55574]).
-* A new function `zeroslike` is added that is used to generate the zero elements for matrix-valued banded matrices.
+* Added keyword argument `alg` to `eigen`, `eigen!`, `eigvals` and `eigvals!` for self-adjoint matrix types
+ (i.e., the type union `RealHermSymComplexHerm`) that allows one to switch between different eigendecomposition
+ algorithms ([#49355]).
+* Added a generic version of the (unblocked) pivoted Cholesky decomposition (callable via
+ `cholesky[!](A, RowMaximum())`) ([#54619]).
+* The number of default BLAS threads now respects process affinity, instead of using the total number of logical
+ threads available on the system ([#55574]).
+* A new function `zeroslike` is added that generates the zero elements for matrix-valued banded matrices.
Custom array types may specialize this function to return an appropriate result ([#55252]).
* The matrix multiplication `A * B` calls `matprod_dest(A, B, T::Type)` to generate the destination.
This function is now public ([#55537]).
@@ -164,41 +163,33 @@ Standard library changes
This is now public ([#56223]).
* A new function `diagview` is added that returns a view into a specific band of an `AbstractMatrix` ([#56175]).
-#### Logging
-
-#### Printf
-
#### Profile
-* `Profile.take_heap_snapshot` takes a new keyword argument, `redact_data::Bool`,
- that is `true` by default. When set, the contents of Julia objects are not emitted
- in the heap snapshot. This currently only applies to strings. ([#55326])
+* `Profile.take_heap_snapshot` takes a new keyword argument, `redact_data::Bool`, which is `true` by default.
+ When set, the contents of Julia objects are not emitted in the heap snapshot. This currently only applies to
+ strings ([#55326]).
* `Profile.print()` now colors Base/Core/Package modules similarly to how they are in stacktraces.
Also paths, even if truncated, are now clickable in terminals that support URI links
- to take you to the specified `JULIA_EDITOR` for the given file & line number. ([#55335])
-
-#### Random
+ to take you to the specified `JULIA_EDITOR` for the given file & line number ([#55335]).
#### REPL
-- Using the new `usings=true` feature of the `names()` function, REPL completions can now
- complete names that have been explicitly `using`-ed. ([#54610])
-- REPL completions can now complete input lines like `[import|using] Mod: xxx|` e.g.
- complete `using Base.Experimental: @op` to `using Base.Experimental: @opaque`. ([#54719])
-- the REPL will now warn if it detects a name is being accessed from a module which does not define it (nor has a submodule which defines it),
- and for which the name is not public in that module. For example, `map` is defined in Base, and executing `LinearAlgebra.map`
- in the REPL will now issue a warning the first time occurs. ([#54872])
-- When an object is printed automatically (by being returned in the REPL), its display is now truncated after printing 20 KiB.
- This does not affect manual calls to `show`, `print`, and so forth. ([#53959])
-- Backslash completions now print the respective glyph or emoji next to each matching backslash shortcode. ([#54800])
-
-#### SuiteSparse
-
-#### SparseArrays
+* Using the new `usings=true` feature of the `names()` function, REPL completions can now
+ complete names visible via `using` ([#54610]).
+* REPL completions can now complete input lines like `[import|using] Mod: xxx|` e.g.
+ complete `using Base.Experimental: @op` to `using Base.Experimental: @opaque` ([#54719]).
+* The REPL will now warn if it detects a name is being accessed via a module which does not define it (nor has
+ a submodule which defines it), and for which the name is not public in that module. For example, `map` is
+ defined in Base, and executing `LinearAlgebra.map` in the REPL will now issue a warning the first time it
+ occurs ([#54872]).
+* When the result of a REPL input is printed, the output is now truncated to 20 KiB.
+ This does not affect manual calls to `show`, `print`, etc. ([#53959]).
+* Backslash completions now print the respective glyph or emoji next to each matching backslash shortcode ([#54800]).
#### Test
-* A failing `DefaultTestSet` now prints to screen the random number generator (RNG) of the failed test, to help reproducing a stochastic failure which only depends on the state of the RNG.
+* A failing `DefaultTestSet` now prints to screen the random number generator (RNG) of the failed test, to help
+ reproducing a stochastic failure which only depends on the state of the RNG.
It is also possible seed a test set by passing the `rng` keyword argument to `@testset`:
```julia
using Test, Random
@@ -207,35 +198,86 @@ Standard library changes
end
```
-#### Dates
-
-#### Statistics
-
-#### Distributed
-
-#### Unicode
-
-#### DelimitedFiles
-
#### InteractiveUtils
* New macros `@trace_compile` and `@trace_dispatch` for running an expression with
- `--trace-compile=stderr --trace-compile-timing` and `--trace-dispatch=stderr` respectively enabled.
- ([#55915])
-
-Deprecated or removed
----------------------
+ `--trace-compile=stderr --trace-compile-timing` and `--trace-dispatch=stderr` respectively enabled ([#55915]).
External dependencies
---------------------
-- The terminal info database, `terminfo`, is now vendored by default, providing a better
+* The terminal info database, `terminfo`, is now vendored by default, providing a better
REPL user experience when `terminfo` is not available on the system. Julia can be built
- without vendoring the database using the Makefile option `WITH_TERMINFO=0`. ([#55411])
+ without vendoring the database using the Makefile option `WITH_TERMINFO=0` ([#55411]).
Tooling Improvements
--------------------
-- A wall-time profiler is now available for users who need a sampling profiler that captures tasks regardless of their scheduling or running state. This type of profiler enables profiling of I/O-heavy tasks and helps detect areas of heavy contention in the system ([#55889]).
+* A wall-time profiler is now available for users who need a sampling profiler that captures tasks regardless
+ of their scheduling or running state. This type of profiler enables profiling of I/O-heavy tasks and helps
+ detect areas of heavy contention in the system ([#55889]).
+[#39071]: https://github.com/JuliaLang/julia/issues/39071
+[#40632]: https://github.com/JuliaLang/julia/issues/40632
+[#40989]: https://github.com/JuliaLang/julia/issues/40989
+[#45793]: https://github.com/JuliaLang/julia/issues/45793
+[#49355]: https://github.com/JuliaLang/julia/issues/49355
+[#50988]: https://github.com/JuliaLang/julia/issues/50988
+[#51149]: https://github.com/JuliaLang/julia/issues/51149
+[#51810]: https://github.com/JuliaLang/julia/issues/51810
+[#52103]: https://github.com/JuliaLang/julia/issues/52103
+[#52999]: https://github.com/JuliaLang/julia/issues/52999
+[#53159]: https://github.com/JuliaLang/julia/issues/53159
+[#53341]: https://github.com/JuliaLang/julia/issues/53341
+[#53415]: https://github.com/JuliaLang/julia/issues/53415
+[#53474]: https://github.com/JuliaLang/julia/issues/53474
+[#53515]: https://github.com/JuliaLang/julia/issues/53515
+[#53664]: https://github.com/JuliaLang/julia/issues/53664
+[#53687]: https://github.com/JuliaLang/julia/issues/53687
+[#53742]: https://github.com/JuliaLang/julia/issues/53742
+[#53959]: https://github.com/JuliaLang/julia/issues/53959
+[#54107]: https://github.com/JuliaLang/julia/issues/54107
+[#54283]: https://github.com/JuliaLang/julia/issues/54283
+[#54494]: https://github.com/JuliaLang/julia/issues/54494
+[#54504]: https://github.com/JuliaLang/julia/issues/54504
+[#54595]: https://github.com/JuliaLang/julia/issues/54595
+[#54609]: https://github.com/JuliaLang/julia/issues/54609
+[#54610]: https://github.com/JuliaLang/julia/issues/54610
+[#54619]: https://github.com/JuliaLang/julia/issues/54619
+[#54653]: https://github.com/JuliaLang/julia/issues/54653
+[#54662]: https://github.com/JuliaLang/julia/issues/54662
+[#54707]: https://github.com/JuliaLang/julia/issues/54707
+[#54719]: https://github.com/JuliaLang/julia/issues/54719
+[#54800]: https://github.com/JuliaLang/julia/issues/54800
+[#54834]: https://github.com/JuliaLang/julia/issues/54834
+[#54872]: https://github.com/JuliaLang/julia/issues/54872
+[#55047]: https://github.com/JuliaLang/julia/issues/55047
+[#55080]: https://github.com/JuliaLang/julia/issues/55080
+[#55099]: https://github.com/JuliaLang/julia/issues/55099
+[#55252]: https://github.com/JuliaLang/julia/issues/55252
+[#55326]: https://github.com/JuliaLang/julia/issues/55326
+[#55335]: https://github.com/JuliaLang/julia/issues/55335
+[#55351]: https://github.com/JuliaLang/julia/issues/55351
+[#55411]: https://github.com/JuliaLang/julia/issues/55411
+[#55537]: https://github.com/JuliaLang/julia/issues/55537
+[#55574]: https://github.com/JuliaLang/julia/issues/55574
+[#55763]: https://github.com/JuliaLang/julia/issues/55763
+[#55772]: https://github.com/JuliaLang/julia/issues/55772
+[#55793]: https://github.com/JuliaLang/julia/issues/55793
+[#55848]: https://github.com/JuliaLang/julia/issues/55848
+[#55889]: https://github.com/JuliaLang/julia/issues/55889
+[#55915]: https://github.com/JuliaLang/julia/issues/55915
+[#56175]: https://github.com/JuliaLang/julia/issues/56175
+[#56196]: https://github.com/JuliaLang/julia/issues/56196
+[#56223]: https://github.com/JuliaLang/julia/issues/56223
+[#56320]: https://github.com/JuliaLang/julia/issues/56320
+[#56346]: https://github.com/JuliaLang/julia/issues/56346
+[#56660]: https://github.com/JuliaLang/julia/issues/56660
+[#56692]: https://github.com/JuliaLang/julia/issues/56692
+[#56745]: https://github.com/JuliaLang/julia/issues/56745
+[#56925]: https://github.com/JuliaLang/julia/issues/56925
+[#57069]: https://github.com/JuliaLang/julia/issues/57069
+[#57081]: https://github.com/JuliaLang/julia/issues/57081
+[#57087]: https://github.com/JuliaLang/julia/issues/57087
+[#57109]: https://github.com/JuliaLang/julia/issues/57109
diff --git a/README.md b/README.md
index 021322336d286..0ed6ed9117434 100644
--- a/README.md
+++ b/README.md
@@ -43,7 +43,7 @@ and installing Julia, below.
## Resources
- **Homepage:**
-- **Binaries:**
+- **Install:**
- **Source code:**
- **Documentation:**
- **Packages:**
@@ -63,17 +63,22 @@ helpful to start contributing to the Julia codebase.
## Binary Installation
-If you would rather not compile the latest Julia from source,
-platform-specific tarballs with pre-compiled binaries are also
-[available for download](https://julialang.org/downloads/). The
-downloads page also provides details on the
-[different tiers of support](https://julialang.org/downloads/#supported_platforms)
-for OS and platform combinations.
-
-If everything works correctly, you will see a Julia banner and an
-interactive prompt into which you can enter expressions for
-evaluation. You can read about [getting
-started](https://docs.julialang.org/en/v1/manual/getting-started/) in the manual.
+The recommended way of installing Julia is to use `juliaup` which will install
+the latest stable `julia` for you and help keep it up to date. It can also let
+you install and run different Julia versions simultaneously. Instructions for
+this can be find [here](https://julialang.org/install/). If you want to manually
+download specific Julia binaries, you can find those on the [downloads
+page](https://julialang.org/downloads/). The downloads page also provides
+details on the [different tiers of
+support](https://julialang.org/downloads/#supported_platforms) for OS and
+platform combinations.
+
+If everything works correctly, you will get a `julia` program and when you run
+it in a terminal or command prompt, you will see a Julia banner and an
+interactive prompt into which you can enter expressions for evaluation. You can
+read about [getting
+started](https://docs.julialang.org/en/v1/manual/getting-started/) in the
+manual.
**Note**: Although some OS package managers provide Julia, such
installations are neither maintained nor endorsed by the Julia
diff --git a/VERSION b/VERSION
index f6c25a3020b69..29c70926ea7d9 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.12.0-DEV
+1.12.0-beta3+RAI
diff --git a/base/Base.jl b/base/Base.jl
index fde41a5f19f37..c6e33df751782 100644
--- a/base/Base.jl
+++ b/base/Base.jl
@@ -21,7 +21,6 @@ include(strcat(BUILDROOT, "version_git.jl")) # include($BUILDROOT/base/version_g
# Initialize DL_LOAD_PATH as early as possible. We are defining things here in
# a slightly more verbose fashion than usual, because we're running so early.
-const DL_LOAD_PATH = String[]
let os = ccall(:jl_get_UNAME, Any, ())
if os === :Darwin || os === :Apple
if Base.DARWIN_FRAMEWORK
@@ -37,9 +36,7 @@ include("views.jl")
# numeric operations
include("hashing.jl")
-include("rounding.jl")
include("div.jl")
-include("float.jl")
include("twiceprecision.jl")
include("complex.jl")
include("rational.jl")
@@ -107,6 +104,9 @@ include("strings/strings.jl")
include("regex.jl")
include("parse.jl")
include("shell.jl")
+const IRShow = Compiler.IRShow # an alias for compatibility
+include("stacktraces.jl")
+using .StackTraces
include("show.jl")
include("arrayshow.jl")
include("methodshow.jl")
@@ -243,10 +243,6 @@ include("irrationals.jl")
include("mathconstants.jl")
using .MathConstants: ℯ, π, pi
-# Stack frames and traces
-include("stacktraces.jl")
-using .StackTraces
-
# experimental API's
include("experimental.jl")
diff --git a/base/Base_compiler.jl b/base/Base_compiler.jl
index 4ec6bae171d8f..702a00c8b8b07 100644
--- a/base/Base_compiler.jl
+++ b/base/Base_compiler.jl
@@ -1,8 +1,8 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license
-baremodule Base
+module Base
-using Core.Intrinsics, Core.IR
+using .Core.Intrinsics, .Core.IR
# to start, we're going to use a very simple definition of `include`
# that doesn't require any function (except what we can get from the `Core` top-module)
@@ -135,6 +135,9 @@ include("coreio.jl")
import Core: @doc, @__doc__, WrappedException, @int128_str, @uint128_str, @big_str, @cmd
+# Export list
+include("exports.jl")
+
# core docsystem
include("docs/core.jl")
Core.atdoc!(CoreDocs.docm)
@@ -142,7 +145,6 @@ Core.atdoc!(CoreDocs.docm)
eval(x) = Core.eval(Base, x)
eval(m::Module, x) = Core.eval(m, x)
-include("exports.jl")
include("public.jl")
if false
@@ -155,11 +157,40 @@ if false
println(io::IO, x...) = Core.println(io, x...)
end
+## Load essential files and libraries
+include("essentials.jl")
+
+# Because lowering inserts direct references, it is mandatory for this binding
+# to exist before we start inferring code.
+function string end
+import Core: String
+
+# For OS specific stuff
+# We need to strcat things here, before strings are really defined
+function strcat(x::String, y::String)
+ out = ccall(:jl_alloc_string, Ref{String}, (Int,), Core.sizeof(x) + Core.sizeof(y))
+ gc_x = @_gc_preserve_begin(x)
+ gc_y = @_gc_preserve_begin(y)
+ gc_out = @_gc_preserve_begin(out)
+ out_ptr = unsafe_convert(Ptr{UInt8}, out)
+ unsafe_copyto!(out_ptr, unsafe_convert(Ptr{UInt8}, x), Core.sizeof(x))
+ unsafe_copyto!(out_ptr + Core.sizeof(x), unsafe_convert(Ptr{UInt8}, y), Core.sizeof(y))
+ @_gc_preserve_end(gc_x)
+ @_gc_preserve_end(gc_y)
+ @_gc_preserve_end(gc_out)
+ return out
+end
+
+
"""
time_ns() -> UInt64
-Get the time in nanoseconds relative to some arbitrary time in the past. The primary use is for measuring the elapsed time
-between two moments in time.
+Get the time in nanoseconds relative to some machine-specific arbitrary time in the past.
+The primary use is for measuring elapsed times during program execution. The return value is guaranteed to
+be monotonic (mod 2⁶⁴) while the system is running, and is unaffected by clock drift or changes to local calendar time,
+but it may change arbitrarily across system reboots or suspensions.
+
+(Although the returned time is always in nanoseconds, the timing resolution is platform-dependent.)
"""
time_ns() = ccall(:jl_hrtime, UInt64, ())
@@ -169,8 +200,6 @@ const _DOCS_ALIASING_WARNING = """
Behavior can be unexpected when any mutated argument shares memory with any other argument.
"""
-## Load essential files and libraries
-include("essentials.jl")
include("ctypes.jl")
include("gcutils.jl")
include("generator.jl")
@@ -199,6 +228,63 @@ function Core._hasmethod(@nospecialize(f), @nospecialize(t)) # this function has
return Core._hasmethod(tt)
end
+"""
+ invokelatest(f, args...; kwargs...)
+
+Calls `f(args...; kwargs...)`, but guarantees that the most recent method of `f`
+will be executed. This is useful in specialized circumstances,
+e.g. long-running event loops or callback functions that may
+call obsolete versions of a function `f`.
+(The drawback is that `invokelatest` is somewhat slower than calling
+`f` directly, and the type of the result cannot be inferred by the compiler.)
+
+!!! compat "Julia 1.9"
+ Prior to Julia 1.9, this function was not exported, and was called as `Base.invokelatest`.
+"""
+const invokelatest = Core.invokelatest
+
+# define invokelatest(f, args...; kwargs...), without kwargs wrapping
+# to forward to invokelatest
+function Core.kwcall(kwargs::NamedTuple, ::typeof(invokelatest), f, args...)
+ @inline
+ return Core.invokelatest(Core.kwcall, kwargs, f, args...)
+end
+setfield!(typeof(invokelatest).name.mt, :max_args, 2, :monotonic) # invokelatest, f, args...
+
+"""
+ invoke_in_world(world, f, args...; kwargs...)
+
+Call `f(args...; kwargs...)` in a fixed world age, `world`.
+
+This is useful for infrastructure running in the user's Julia session which is
+not part of the user's program. For example, things related to the REPL, editor
+support libraries, etc. In these cases it can be useful to prevent unwanted
+method invalidation and recompilation latency, and to prevent the user from
+breaking supporting infrastructure by mistake.
+
+The global world age can be queried using [`Base.get_world_counter()`](@ref)
+and stored for later use within the lifetime of the current Julia session, or
+when serializing and reloading the system image.
+
+Technically, `invoke_in_world` will prevent any function called by `f` from
+being extended by the user during their Julia session. That is, generic
+function method tables seen by `f` (and any functions it calls) will be frozen
+as they existed at the given `world` age. In a sense, this is like the opposite
+of [`invokelatest`](@ref).
+
+!!! note
+ It is not valid to store world ages obtained in precompilation for later use.
+ This is because precompilation generates a "parallel universe" where the
+ world age refers to system state unrelated to the main Julia session.
+"""
+const invoke_in_world = Core.invoke_in_world
+
+function Core.kwcall(kwargs::NamedTuple, ::typeof(invoke_in_world), world::UInt, f, args...)
+ @inline
+ return Core.invoke_in_world(world, Core.kwcall, kwargs, f, args...)
+end
+setfield!(typeof(invoke_in_world).name.mt, :max_args, 3, :monotonic) # invoke_in_world, world, f, args...
+
# core operations & types
include("promotion.jl")
include("tuple.jl")
@@ -218,14 +304,19 @@ include("pointer.jl")
include("refvalue.jl")
include("cmem.jl")
-include("checked.jl")
-using .Checked
-function cld end
-function fld end
+function nextfloat end
+function prevfloat end
+include("rounding.jl")
+include("float.jl")
# Lazy strings
include("strings/lazy.jl")
+function cld end
+function fld end
+include("checked.jl")
+using .Checked
+
# array structures
include("indices.jl")
include("genericmemory.jl")
@@ -255,24 +346,9 @@ using .Order
include("coreir.jl")
include("invalidation.jl")
-# Because lowering inserts direct references, it is mandatory for this binding
-# to exist before we start inferring code.
-function string end
-
-# For OS specific stuff
-# We need to strcat things here, before strings are really defined
-function strcat(x::String, y::String)
- out = ccall(:jl_alloc_string, Ref{String}, (Csize_t,), Core.sizeof(x) + Core.sizeof(y))
- GC.@preserve x y out begin
- out_ptr = unsafe_convert(Ptr{UInt8}, out)
- unsafe_copyto!(out_ptr, unsafe_convert(Ptr{UInt8}, x), Core.sizeof(x))
- unsafe_copyto!(out_ptr + Core.sizeof(x), unsafe_convert(Ptr{UInt8}, y), Core.sizeof(y))
- end
- return out
-end
-
BUILDROOT::String = ""
DATAROOT::String = ""
+const DL_LOAD_PATH = String[]
baremodule BuildSettings end
@@ -308,4 +384,5 @@ Core._setparser!(fl_parse)
# Further definition of Base will happen in Base.jl if loaded.
-end # baremodule Base
+end # module Base
+using .Base
diff --git a/base/Makefile b/base/Makefile
index 09f79e5b98611..34791f7b4b0d4 100644
--- a/base/Makefile
+++ b/base/Makefile
@@ -18,9 +18,9 @@ else
endif
define parse_features
-@echo "# $(2) features" >> $@
+@printf "%s\n" "# $(2) features" >> $@
@$(call PRINT_PERL, cat $(SRCDIR)/../src/features_$(1).h | perl -lne 'print "const JL_$(2)_$$1 = UInt32($$2)" if /^\s*JL_FEATURE_DEF(?:_NAME)?\(\s*(\w+)\s*,\s*([^,]+)\s*,.*\)\s*(?:\/\/.*)?$$/' >> $@)
-@echo >> $@
+@printf "\n" >> $@
endef
$(BUILDDIR)/features_h.jl: $(SRCDIR)/../src/features_x86.h $(SRCDIR)/../src/features_aarch32.h $(SRCDIR)/../src/features_aarch64.h
@@ -33,7 +33,7 @@ $(BUILDDIR)/pcre_h.jl: $(PCRE_INCL_PATH)
@$(call PRINT_PERL, $(CPP) -D PCRE2_CODE_UNIT_WIDTH=8 -dM $< | perl -nle '/^\s*#define\s+PCRE2_(\w*)\s*\(?($(PCRE_CONST))\)?u?\s*$$/ and print index($$1, "ERROR_") == 0 ? "const $$1 = Cint($$2)" : "const $$1 = UInt32($$2)"' | LC_ALL=C sort > $@)
$(BUILDDIR)/errno_h.jl:
- @$(call PRINT_PERL, echo '#include ' | $(CPP) -dM - | perl -nle 'print "const $$1 = Int32($$2)" if /^#define\s+(E\w+)\s+(\d+)\s*$$/' | LC_ALL=C sort > $@)
+ @$(call PRINT_PERL, printf "%s\n" '#include ' | $(CPP) -dM - | perl -nle 'print "const $$1 = Int32($$2)" if /^#define\s+(E\w+)\s+(\d+)\s*$$/' | LC_ALL=C sort > $@)
$(BUILDDIR)/file_constants.jl: $(SRCDIR)/../src/file_constants.h
@$(call PRINT_PERL, $(CPP_STDOUT) -DJULIA $< | perl -nle 'print "$$1 0o$$2" if /^(\s*const\s+[A-z_]+\s+=)\s+(0[0-9]*)\s*$$/; print "$$1" if /^\s*(const\s+[A-z_]+\s+=\s+([1-9]|0x)[0-9A-z]*)\s*$$/' > $@)
@@ -42,57 +42,46 @@ $(BUILDDIR)/uv_constants.jl: $(SRCDIR)/../src/uv_constants.h $(LIBUV_INC)/uv/err
@$(call PRINT_PERL, $(CPP_STDOUT) "-I$(LIBUV_INC)" -DJULIA $< | tail -n 16 > $@)
$(BUILDDIR)/build_h.jl.phony:
- @echo "# This file is automatically generated in base/Makefile" > $@
+ @printf "%s\n" "# This file is automatically generated in base/Makefile" > $@
ifeq ($(XC_HOST),)
- @echo "const MACHINE = \"$(BUILD_MACHINE)\"" >> $@
+ @printf "%s\n" "const MACHINE = \"$(BUILD_MACHINE)\"" >> $@
else
- @echo "const MACHINE = \"$(XC_HOST)\"" >> $@
+ @printf "%s\n" "const MACHINE = \"$(XC_HOST)\"" >> $@
endif
- @echo "const libm_name = \"$(LIBMNAME)\"" >> $@
+ @printf "%s\n" "const libm_name = \"$(LIBMNAME)\"" >> $@
ifeq ($(USE_BLAS64), 1)
- @echo "const USE_BLAS64 = true" >> $@
+ @printf "%s\n" "const USE_BLAS64 = true" >> $@
else
- @echo "const USE_BLAS64 = false" >> $@
+ @printf "%s\n" "const USE_BLAS64 = false" >> $@
endif
ifeq ($(USE_GPL_LIBS), 1)
- @echo "const USE_GPL_LIBS = true" >> $@
+ @printf "%s\n" "const USE_GPL_LIBS = true" >> $@
else
- @echo "const USE_GPL_LIBS = false" >> $@
-endif
- @echo "const libllvm_version_string = \"$$($(LLVM_CONFIG_HOST) --version)\"" >> $@
- @echo "const libllvm_name = \"$(LLVM_SHARED_LIB_NAME)\"" >> $@
- @echo "const VERSION_STRING = \"$(JULIA_VERSION)\"" >> $@
- @echo "const TAGGED_RELEASE_BANNER = \"$(TAGGED_RELEASE_BANNER)\"" >> $@
-ifeq ($(OS),WINNT)
- @printf 'const SYSCONFDIR = "%s"\n' '$(subst /,\\,$(sysconfdir_rel))' >> $@
- @printf 'const DATAROOTDIR = "%s"\n' '$(subst /,\\,$(datarootdir_rel))' >> $@
- @printf 'const DOCDIR = "%s"\n' '$(subst /,\\,$(docdir_rel))' >> $@
- @printf 'const LIBDIR = "%s"\n' '$(subst /,\\,$(libdir_rel))' >> $@
- @printf 'const LIBEXECDIR = "%s"\n' '$(subst /,\\,$(libexecdir_rel))' >> $@
- @printf 'const PRIVATE_LIBDIR = "%s"\n' '$(subst /,\\,$(private_libdir_rel))' >> $@
- @printf 'const PRIVATE_LIBEXECDIR = "%s"\n' '$(subst /,\\,$(private_libexecdir_rel))' >> $@
- @printf 'const INCLUDEDIR = "%s"\n' '$(subst /,\\,$(includedir_rel))' >> $@
-else
- @echo "const SYSCONFDIR = \"$(sysconfdir_rel)\"" >> $@
- @echo "const DATAROOTDIR = \"$(datarootdir_rel)\"" >> $@
- @echo "const DOCDIR = \"$(docdir_rel)\"" >> $@
- @echo "const LIBDIR = \"$(libdir_rel)\"" >> $@
- @echo "const LIBEXECDIR = \"$(libexecdir_rel)\"" >> $@
- @echo "const PRIVATE_LIBDIR = \"$(private_libdir_rel)\"" >> $@
- @echo "const PRIVATE_LIBEXECDIR = \"$(private_libexecdir_rel)\"" >> $@
- @echo "const INCLUDEDIR = \"$(includedir_rel)\"" >> $@
+ @printf "%s\n" "const USE_GPL_LIBS = false" >> $@
endif
+ @printf "%s\n" "const libllvm_version_string = \"$$($(LLVM_CONFIG_HOST) --version)\"" >> $@
+ @printf "%s\n" "const libllvm_name = \"$(LLVM_SHARED_LIB_NAME)\"" >> $@
+ @printf "%s\n" "const VERSION_STRING = \"$(JULIA_VERSION)\"" >> $@
+ @printf "%s\n" "const TAGGED_RELEASE_BANNER = \"$(TAGGED_RELEASE_BANNER)\"" >> $@
+ @printf "%s\n" "const SYSCONFDIR = "$(call shell_escape,$(call julia_escape,$(call normalize_path,$(sysconfdir_rel)))) >> $@
+ @printf "%s\n" "const DATAROOTDIR = "$(call shell_escape,$(call julia_escape,$(call normalize_path,$(datarootdir_rel)))) >> $@
+ @printf "%s\n" "const DOCDIR = "$(call shell_escape,$(call julia_escape,$(call normalize_path,$(docdir_rel)))) >> $@
+ @printf "%s\n" "const LIBDIR = "$(call shell_escape,$(call julia_escape,$(call normalize_path,$(libdir_rel)))) >> $@
+ @printf "%s\n" "const LIBEXECDIR = "$(call shell_escape,$(call julia_escape,$(call normalize_path,$(libexecdir_rel)))) >> $@
+ @printf "%s\n" "const PRIVATE_LIBDIR = "$(call shell_escape,$(call julia_escape,$(call normalize_path,$(private_libdir_rel)))) >> $@
+ @printf "%s\n" "const PRIVATE_LIBEXECDIR = "$(call shell_escape,$(call julia_escape,$(call normalize_path,$(private_libexecdir_rel)))) >> $@
+ @printf "%s\n" "const INCLUDEDIR = "$(call shell_escape,$(call julia_escape,$(call normalize_path,$(includedir_rel)))) >> $@
ifeq ($(DARWIN_FRAMEWORK), 1)
- @echo "const DARWIN_FRAMEWORK = true" >> $@
- @echo "const DARWIN_FRAMEWORK_NAME = \"$(FRAMEWORK_NAME)\"" >> $@
+ @printf "%s\n" "const DARWIN_FRAMEWORK = true" >> $@
+ @printf "%s\n" "const DARWIN_FRAMEWORK_NAME = \"$(FRAMEWORK_NAME)\"" >> $@
else
- @echo "const DARWIN_FRAMEWORK = false" >> $@
+ @printf "%s\n" "const DARWIN_FRAMEWORK = false" >> $@
endif
ifeq ($(OS), Darwin)
- @echo "const MACOS_PRODUCT_VERSION = \"$(shell sw_vers -productVersion)\"" >> $@
- @echo "const MACOS_PLATFORM_VERSION = \"$(shell xcrun --show-sdk-version)\"" >> $@
+ @printf "%s\n" "const MACOS_PRODUCT_VERSION = \"$(shell sw_vers -productVersion)\"" >> $@
+ @printf "%s\n" "const MACOS_PLATFORM_VERSION = \"$(shell xcrun --show-sdk-version)\"" >> $@
endif
- @echo "const BUILD_TRIPLET = \"$(BB_TRIPLET_LIBGFORTRAN_CXXABI)\"" >> $@
+ @printf "%s\n" "const BUILD_TRIPLET = \"$(BB_TRIPLET_LIBGFORTRAN_CXXABI)\"" >> $@
@# This to ensure that we always rebuild this file, but only when it is modified do we touch build_h.jl,
@# ensuring we rebuild the system image as infrequently as possible
@@ -115,10 +104,10 @@ ifneq ($(NO_GIT), 1)
rm -f $@; \
fi
else
-ifeq ($(shell [ -f $(BUILDDIR)/version_git.jl ] && echo "true"), true)
+ifeq ($(shell [ -f $(BUILDDIR)/version_git.jl ] && printf "true\n"), true)
@# Give warning if boilerplate git is used
@if grep -q "Default output if git is not available" $(BUILDDIR)/version_git.jl; then \
- echo "WARNING: Using boilerplate git version info" >&2; \
+ printf "WARNING: Using boilerplate git version info\n" >&2; \
fi
else
$(warning "WARNING: Generating boilerplate git version info")
@@ -141,7 +130,7 @@ resolve_path = \
if [ -n "$${$1_}" ]; then $1_wd=`dirname "$${$1}"`; $1="$${$1_}"; fi
## if it's a relative path, make it an absolute path
resolve_path += && \
- if [ -z "`echo $${$1} | grep '^/'`" ]; then $1=$${$1_wd}/$${$1}; fi
+ if [ -z "`printf "%s\n" "$${$1}" | grep '^/'`" ]; then $1=$${$1_wd}/$${$1}; fi
ifeq ($(OS), Darwin)
# try to use the install_name id instead (unless it is an @rpath or such)
# if it's a relative path, make it an absolute path using the working directory from $1,
@@ -150,7 +139,7 @@ resolve_path += && \
$1_=`otool -D $${$1} | tail -n +2 | sed -e 's/^@.*$$//'` && \
if [ -n "$${$1_}" ]; then \
$1_wd=`dirname "$${$1}"`; $1=$${$1_}; \
- if [ -z "`echo $${$1} | grep '^/'`" ]; then $1=$${$1_wd}/$${$1}; fi; \
+ if [ -z "`printf "%s\n" $${$1} | grep '^/'`" ]; then $1=$${$1_wd}/$${$1}; fi; \
fi
else
# try to use the SO_NAME (if the named file exists)
@@ -164,10 +153,10 @@ endif
## debug code: `make resolve-path P=`
#resolve_path += && \
-# echo "$${$1_wd} $${$1}"
+# printf "%s\n" "$${$1_wd} $${$1}"
#resolve-path:
# $(call resolve_path,P) && \
-# echo "$$P"
+# printf "%s\n" "$$P"
define symlink_system_library
libname_$2 := $$(notdir $(call versioned_libname,$2,$3))
@@ -179,11 +168,11 @@ $$(build_private_libdir)/$$(libname_$2):
$$(call resolve_path,REALPATH) && \
[ -e "$$$$REALPATH" ] && \
rm -f "$$@" && \
- echo ln -sf "$$$$REALPATH" "$$@" && \
+ printf "ln -sf %s %s\n" "$$$$REALPATH" "$$@" && \
ln -sf "$$$$REALPATH" "$$@"; \
else \
if [ "$4" != "ALLOW_FAILURE" ]; then \
- echo "System library symlink failure: Unable to locate $$(libname_$2) on your system!" >&2; \
+ printf "%s\n" "System library symlink failure: Unable to locate $$(libname_$2) on your system!" >&2; \
false; \
fi; \
fi
@@ -295,7 +284,7 @@ $(build_private_libdir)/libLLVM.$(SHLIB_EXT):
$(call resolve_path,REALPATH) && \
[ -e "$$REALPATH" ] && \
rm -f "$@" && \
- echo ln -sf "$$REALPATH" "$@" && \
+ printf "%s\n" ln -sf "$$REALPATH" "$@" && \
ln -sf "$$REALPATH" "$@"
ifneq ($(USE_SYSTEM_LLVM),0)
ifneq ($(USE_LLVM_SHLIB),0)
diff --git a/base/abstractarray.jl b/base/abstractarray.jl
index 1ab78a55c93b5..2632592ede7c1 100644
--- a/base/abstractarray.jl
+++ b/base/abstractarray.jl
@@ -3412,12 +3412,19 @@ function ith_all(i, as)
end
function map_n!(f::F, dest::AbstractArray, As) where F
- idxs1 = LinearIndices(As[1])
- @boundscheck LinearIndices(dest) == idxs1 && all(x -> LinearIndices(x) == idxs1, As)
- for i = idxs1
- @inbounds I = ith_all(i, As)
- val = f(I...)
- @inbounds dest[i] = val
+ idxs = LinearIndices(dest)
+ if all(x -> LinearIndices(x) == idxs, As)
+ for i in idxs
+ @inbounds as = ith_all(i, As)
+ val = f(as...)
+ @inbounds dest[i] = val
+ end
+ else
+ for (i, Is...) in zip(eachindex(dest), map(eachindex, As)...)
+ as = ntuple(j->getindex(As[j], Is[j]), length(As))
+ val = f(as...)
+ dest[i] = val
+ end
end
return dest
end
@@ -3667,7 +3674,31 @@ function _keepat!(a::AbstractVector, m::AbstractVector{Bool})
deleteat!(a, j:lastindex(a))
end
-## 1-d circshift ##
+"""
+ circshift!(a::AbstractVector, shift::Integer)
+
+Circularly shift, or rotate, the data in vector `a` by `shift` positions.
+
+# Examples
+
+```jldoctest
+julia> circshift!([1, 2, 3, 4, 5], 2)
+5-element Vector{Int64}:
+ 4
+ 5
+ 1
+ 2
+ 3
+
+julia> circshift!([1, 2, 3, 4, 5], -2)
+5-element Vector{Int64}:
+ 3
+ 4
+ 5
+ 1
+ 2
+```
+"""
function circshift!(a::AbstractVector, shift::Integer)
n = length(a)
n == 0 && return a
diff --git a/base/accumulate.jl b/base/accumulate.jl
index 2748a4da481fa..c155ecfb4f75f 100644
--- a/base/accumulate.jl
+++ b/base/accumulate.jl
@@ -5,12 +5,14 @@
# it does double the number of operations compared to accumulate,
# though for cheap operations like + this does not have much impact (20%)
function _accumulate_pairwise!(op::Op, c::AbstractVector{T}, v::AbstractVector, s, i1, n)::T where {T,Op}
- @inbounds if n < 128
- s_ = v[i1]
- c[i1] = op(s, s_)
+ if n < 128
+ @inbounds s_ = v[i1]
+ ci1 = op(s, s_)
+ @inbounds c[i1] = ci1
for i = i1+1:i1+n-1
- s_ = op(s_, v[i])
- c[i] = op(s, s_)
+ s_ = op(s_, @inbounds(v[i]))
+ ci = op(s, s_)
+ @inbounds c[i] = ci
end
else
n2 = n >> 1
@@ -26,7 +28,8 @@ function accumulate_pairwise!(op::Op, result::AbstractVector, v::AbstractVector)
n = length(li)
n == 0 && return result
i1 = first(li)
- @inbounds result[i1] = v1 = reduce_first(op,v[i1])
+ v1 = reduce_first(op, @inbounds(v[i1]))
+ @inbounds result[i1] = v1
n == 1 && return result
_accumulate_pairwise!(op, result, v, v1, i1+1, n-1)
return result
@@ -378,16 +381,16 @@ function _accumulate!(op, B, A, dims::Integer, init::Union{Nothing, Some})
# We can accumulate to a temporary variable, which allows
# register usage and will be slightly faster
ind1 = inds_t[1]
- @inbounds for I in CartesianIndices(tail(inds_t))
+ for I in CartesianIndices(tail(inds_t))
if init === nothing
- tmp = reduce_first(op, A[first(ind1), I])
+ tmp = reduce_first(op, @inbounds(A[first(ind1), I]))
else
- tmp = op(something(init), A[first(ind1), I])
+ tmp = op(something(init), @inbounds(A[first(ind1), I]))
end
- B[first(ind1), I] = tmp
+ @inbounds B[first(ind1), I] = tmp
for i_1 = first(ind1)+1:last(ind1)
- tmp = op(tmp, A[i_1, I])
- B[i_1, I] = tmp
+ tmp = op(tmp, @inbounds(A[i_1, I]))
+ @inbounds B[i_1, I] = tmp
end
end
else
@@ -401,12 +404,15 @@ end
@noinline function _accumulaten!(op, B, A, R1, ind, R2, init::Nothing)
# Copy the initial element in each 1d vector along dimension `dim`
ii = first(ind)
- @inbounds for J in R2, I in R1
- B[I, ii, J] = reduce_first(op, A[I, ii, J])
+ for J in R2, I in R1
+ tmp = reduce_first(op, @inbounds(A[I, ii, J]))
+ @inbounds B[I, ii, J] = tmp
end
# Accumulate
- @inbounds for J in R2, i in first(ind)+1:last(ind), I in R1
- B[I, i, J] = op(B[I, i-1, J], A[I, i, J])
+ for J in R2, i in first(ind)+1:last(ind), I in R1
+ @inbounds Bv, Av = B[I, i-1, J], A[I, i, J]
+ tmp = op(Bv, Av)
+ @inbounds B[I, i, J] = tmp
end
B
end
@@ -414,12 +420,15 @@ end
@noinline function _accumulaten!(op, B, A, R1, ind, R2, init::Some)
# Copy the initial element in each 1d vector along dimension `dim`
ii = first(ind)
- @inbounds for J in R2, I in R1
- B[I, ii, J] = op(something(init), A[I, ii, J])
+ for J in R2, I in R1
+ tmp = op(something(init), @inbounds(A[I, ii, J]))
+ @inbounds B[I, ii, J] = tmp
end
# Accumulate
- @inbounds for J in R2, i in first(ind)+1:last(ind), I in R1
- B[I, i, J] = op(B[I, i-1, J], A[I, i, J])
+ for J in R2, i in first(ind)+1:last(ind), I in R1
+ @inbounds Bv, Av = B[I, i-1, J], A[I, i, J]
+ tmp = op(Bv, Av)
+ @inbounds B[I, i, J] = tmp
end
B
end
@@ -433,10 +442,10 @@ function _accumulate1!(op, B, v1, A::AbstractVector, dim::Integer)
cur_val = v1
B[i1] = cur_val
next = iterate(inds, state)
- @inbounds while next !== nothing
+ while next !== nothing
(i, state) = next
- cur_val = op(cur_val, A[i])
- B[i] = cur_val
+ cur_val = op(cur_val, @inbounds(A[i]))
+ @inbounds B[i] = cur_val
next = iterate(inds, state)
end
return B
diff --git a/base/array.jl b/base/array.jl
index aafcfc182124b..f134ad2bc9ea5 100644
--- a/base/array.jl
+++ b/base/array.jl
@@ -1348,7 +1348,7 @@ function append! end
function append!(a::Vector{T}, items::Union{AbstractVector{<:T},Tuple}) where T
items isa Tuple && (items = map(x -> convert(T, x), items))
- n = length(items)
+ n = Int(length(items))::Int
_growend!(a, n)
copyto!(a, length(a)-n+1, items, firstindex(items), n)
return a
@@ -1472,7 +1472,8 @@ julia> a[1:6]
1
```
"""
-function resize!(a::Vector, nl::Integer)
+function resize!(a::Vector, nl_::Integer)
+ nl = Int(nl_)::Int
l = length(a)
if nl > l
_growend!(a, nl-l)
diff --git a/base/arraymath.jl b/base/arraymath.jl
index 62dc3772e4938..53a7d132a2c0c 100644
--- a/base/arraymath.jl
+++ b/base/arraymath.jl
@@ -72,12 +72,12 @@ _reverse!(A::AbstractArray{<:Any,N}, ::Colon) where {N} = _reverse!(A, ntuple(id
_reverse!(A, dim::Integer) = _reverse!(A, (Int(dim),))
_reverse!(A, dims::NTuple{M,Integer}) where {M} = _reverse!(A, Int.(dims))
function _reverse!(A::AbstractArray{<:Any,N}, dims::NTuple{M,Int}) where {N,M}
+ dims === () && return A # nothing to reverse
dimrev = ntuple(k -> k in dims, Val{N}()) # boolean tuple indicating reversed dims
if N < M || M != sum(dimrev)
throw(ArgumentError("invalid dimensions $dims in reverse!"))
end
- M == 0 && return A # nothing to reverse
# swapping loop only needs to traverse ≈half of the array
halfsz = ntuple(k -> k == dims[1] ? size(A,k) ÷ 2 : size(A,k), Val{N}())
diff --git a/base/bool.jl b/base/bool.jl
index 3a5c36b09ae2c..3658318d158e5 100644
--- a/base/bool.jl
+++ b/base/bool.jl
@@ -1,5 +1,7 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license
+import Core: Bool
+
# promote Bool to any other numeric type
promote_rule(::Type{Bool}, ::Type{T}) where {T<:Number} = T
diff --git a/base/boot.jl b/base/boot.jl
index e50d74659d399..32975e96af583 100644
--- a/base/boot.jl
+++ b/base/boot.jl
@@ -229,7 +229,7 @@ export
Expr, QuoteNode, LineNumberNode, GlobalRef,
# object model functions
fieldtype, getfield, setfield!, swapfield!, modifyfield!, replacefield!, setfieldonce!,
- nfields, throw, tuple, ===, isdefined, eval,
+ nfields, throw, tuple, ===, isdefined,
# access to globals
getglobal, setglobal!, swapglobal!, modifyglobal!, replaceglobal!, setglobalonce!, isdefinedglobal,
# ifelse, sizeof # not exported, to avoid conflicting with Base
@@ -238,7 +238,9 @@ export
# method reflection
applicable, invoke,
# constants
- nothing, Main
+ nothing, Main,
+ # backwards compatibility
+ arrayref, arrayset, arraysize, const_arrayref
const getproperty = getfield # TODO: use `getglobal` for modules instead
const setproperty! = setfield!
@@ -473,6 +475,7 @@ struct ABIOverride
end
struct PrecompilableError <: Exception end
+struct TrimFailure <: Exception end
String(s::String) = s # no constructor yet
@@ -1012,8 +1015,11 @@ _parse = nothing
_setparser!(parser) = setglobal!(Core, :_parse, parser)
-# support for deprecated uses of internal _apply function
-_apply(x...) = Core._apply_iterate(Main.Base.iterate, x...)
+# support for deprecated uses of builtin functions
+_apply(x...) = _apply_iterate(Main.Base.iterate, x...)
+const _apply_pure = _apply
+const _call_latest = invokelatest
+const _call_in_world = invoke_in_world
struct Pair{A, B}
first::A
@@ -1040,7 +1046,6 @@ const_arrayref(inbounds::Bool, A::Array, i::Int...) = Main.Base.getindex(A, i...
arrayset(inbounds::Bool, A::Array{T}, x::Any, i::Int...) where {T} = Main.Base.setindex!(A, x::T, i...)
arraysize(a::Array) = a.size
arraysize(a::Array, i::Int) = sle_int(i, nfields(a.size)) ? getfield(a.size, i) : 1
-export arrayref, arrayset, arraysize, const_arrayref
const check_top_bit = check_sign_bit
# For convenience
diff --git a/base/c.jl b/base/c.jl
index c1b34579e0a0b..78c48f267ca71 100644
--- a/base/c.jl
+++ b/base/c.jl
@@ -203,11 +203,11 @@ function exit_on_sigint(on::Bool)
ccall(:jl_exit_on_sigint, Cvoid, (Cint,), on)
end
-function _ccallable(rt::Type, sigt::Type)
- ccall(:jl_extern_c, Cvoid, (Any, Any), rt, sigt)
+function _ccallable(name::Union{Nothing, String}, rt::Type, sigt::Type)
+ ccall(:jl_extern_c, Cvoid, (Any, Any, Any), name, rt, sigt)
end
-function expand_ccallable(rt, def)
+function expand_ccallable(name, rt, def)
if isa(def,Expr) && (def.head === :(=) || def.head === :function)
sig = def.args[1]
if sig.head === :(::)
@@ -235,7 +235,7 @@ function expand_ccallable(rt, def)
end
return quote
@__doc__ $(esc(def))
- _ccallable($(esc(rt)), $(Expr(:curly, :Tuple, esc(f), map(esc, at)...)))
+ _ccallable($name, $(esc(rt)), $(Expr(:curly, :Tuple, esc(f), map(esc, at)...)))
end
end
end
@@ -243,16 +243,22 @@ function expand_ccallable(rt, def)
end
"""
- @ccallable(def)
+ @ccallable ["name"] function f(...)::RetType ... end
Make the annotated function be callable from C using its name. This can, for example,
-be used to expose functionality as a C-API when creating a custom Julia sysimage.
+be used to expose functionality as a C API when creating a custom Julia sysimage.
+
+If the first argument is a string, it is used as the external name of the function.
"""
macro ccallable(def)
- expand_ccallable(nothing, def)
+ expand_ccallable(nothing, nothing, def)
end
macro ccallable(rt, def)
- expand_ccallable(rt, def)
+ if rt isa String
+ expand_ccallable(rt, nothing, def)
+ else
+ expand_ccallable(nothing, rt, def)
+ end
end
# @ccall implementation
@@ -268,7 +274,31 @@ The above input outputs this:
(:printf, :Cvoid, [:Cstring, :Cuint], ["%d", :value])
"""
-function ccall_macro_parse(expr::Expr)
+function ccall_macro_parse(exprs)
+ gc_safe = false
+ expr = nothing
+ if exprs isa Expr
+ expr = exprs
+ elseif length(exprs) == 1
+ expr = exprs[1]
+ elseif length(exprs) == 2
+ gc_expr = exprs[1]
+ expr = exprs[2]
+ if gc_expr.head == :(=) && gc_expr.args[1] == :gc_safe
+ if gc_expr.args[2] == true
+ gc_safe = true
+ elseif gc_expr.args[2] == false
+ gc_safe = false
+ else
+ throw(ArgumentError("gc_safe must be true or false"))
+ end
+ else
+ throw(ArgumentError("@ccall option must be `gc_safe=true` or `gc_safe=false`"))
+ end
+ else
+ throw(ArgumentError("@ccall needs a function signature with a return type"))
+ end
+
# setup and check for errors
if !isexpr(expr, :(::))
throw(ArgumentError("@ccall needs a function signature with a return type"))
@@ -328,12 +358,11 @@ function ccall_macro_parse(expr::Expr)
pusharg!(a)
end
end
-
- return func, rettype, types, args, nreq
+ return func, rettype, types, args, gc_safe, nreq
end
-function ccall_macro_lower(convention, func, rettype, types, args, nreq)
+function ccall_macro_lower(convention, func, rettype, types, args, gc_safe, nreq)
statements = []
# if interpolation was used, ensure the value is a function pointer at runtime.
@@ -351,9 +380,15 @@ function ccall_macro_lower(convention, func, rettype, types, args, nreq)
else
func = esc(func)
end
+ cconv = nothing
+ if convention isa Tuple
+ cconv = Expr(:cconv, (convention..., gc_safe), nreq)
+ else
+ cconv = Expr(:cconv, (convention, UInt16(0), gc_safe), nreq)
+ end
return Expr(:block, statements...,
- Expr(:call, :ccall, func, Expr(:cconv, convention, nreq), esc(rettype),
+ Expr(:call, :ccall, func, cconv, esc(rettype),
Expr(:tuple, map(esc, types)...), map(esc, args)...))
end
@@ -404,9 +439,16 @@ Example using an external library:
The string literal could also be used directly before the function
name, if desired `"libglib-2.0".g_uri_escape_string(...`
+
+It's possible to declare the ccall as `gc_safe` by using the `gc_safe = true` option:
+ @ccall gc_safe=true strlen(s::Cstring)::Csize_t
+This allows the garbage collector to run concurrently with the ccall, which can be useful whenever
+the `ccall` may block outside of julia.
+WARNING: This option should be used with caution, as it can lead to undefined behavior if the ccall
+calls back into the julia runtime. (`@cfunction`/`@ccallables` are safe however)
"""
-macro ccall(expr)
- return ccall_macro_lower(:ccall, ccall_macro_parse(expr)...)
+macro ccall(exprs...)
+ return ccall_macro_lower((:ccall), ccall_macro_parse(exprs)...)
end
macro ccall_effects(effects::UInt16, expr)
diff --git a/base/channels.jl b/base/channels.jl
index ef508bd40e3ed..0bb73e9acba87 100644
--- a/base/channels.jl
+++ b/base/channels.jl
@@ -61,7 +61,7 @@ Channel(sz=0) = Channel{Any}(sz)
"""
Channel{T=Any}(func::Function, size=0; taskref=nothing, spawn=false, threadpool=nothing)
-Create a new task from `func`, bind it to a new channel of type
+Create a new task from `func`, [`bind`](@ref) it to a new channel of type
`T` and size `size`, and schedule the task, all in a single call.
The channel is automatically closed when the task terminates.
diff --git a/base/char.jl b/base/char.jl
index 2e8410f6903e2..c089262ebf779 100644
--- a/base/char.jl
+++ b/base/char.jl
@@ -1,5 +1,7 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license
+import Core: AbstractChar, Char
+
"""
The `AbstractChar` type is the supertype of all character implementations
in Julia. A character represents a Unicode code point, and can be converted
diff --git a/base/client.jl b/base/client.jl
index 2527d382c695d..70d564a54615a 100644
--- a/base/client.jl
+++ b/base/client.jl
@@ -265,11 +265,10 @@ function exec_options(opts)
distributed_mode = (opts.worker == 1) || (opts.nprocs > 0) || (opts.machine_file != C_NULL)
if distributed_mode
let Distributed = require(PkgId(UUID((0x8ba89e20_285c_5b6f, 0x9357_94700520ee1b)), "Distributed"))
- Core.eval(MainInclude, :(const Distributed = $Distributed))
+ MainInclude.Distributed = Distributed
Core.eval(Main, :(using Base.MainInclude.Distributed))
+ invokelatest(Distributed.process_opts, opts)
end
-
- invokelatest(Main.Distributed.process_opts, opts)
end
interactiveinput = (repl || is_interactive::Bool) && isa(stdin, TTY)
@@ -401,7 +400,7 @@ function load_InteractiveUtils(mod::Module=Main)
try
# TODO: we have to use require_stdlib here because it is a dependency of REPL, but we would sort of prefer not to
let InteractiveUtils = require_stdlib(PkgId(UUID(0xb77e0a4c_d291_57a0_90e8_8db25a27a240), "InteractiveUtils"))
- Core.eval(MainInclude, :(const InteractiveUtils = $InteractiveUtils))
+ MainInclude.InteractiveUtils = InteractiveUtils
end
catch ex
@warn "Failed to import InteractiveUtils into module $mod" exception=(ex, catch_backtrace())
@@ -440,11 +439,12 @@ function run_fallback_repl(interactive::Bool)
eval_user_input(stderr, ex, true)
end
else
- while !eof(input)
+ while true
if interactive
print("julia> ")
flush(stdout)
end
+ eof(input) && break
try
line = ""
ex = nothing
@@ -536,6 +536,10 @@ The thrown errors are collected in a stack of exceptions.
"""
global err = nothing
+# Used for memoizing require_stdlib of these modules
+global InteractiveUtils::Module
+global Distributed::Module
+
# weakly exposes ans and err variables to Main
export ans, err
end
@@ -606,7 +610,7 @@ The `@main` macro may be used standalone or as part of the function definition,
case, parentheses are required. In particular, the following are equivalent:
```
-function (@main)(args)
+function @main(args)
println("Hello World")
end
```
@@ -625,7 +629,7 @@ imported into `Main`, it will be treated as an entrypoint in `Main`:
```
module MyApp
export main
- (@main)(args) = println("Hello World")
+ @main(args) = println("Hello World")
end
using .MyApp
# `julia` Will execute MyApp.main at the conclusion of script execution
@@ -635,7 +639,7 @@ Note that in particular, the semantics do not attach to the method
or the name:
```
module MyApp
- (@main)(args) = println("Hello World")
+ @main(args) = println("Hello World")
end
const main = MyApp.main
# `julia` Will *NOT* execute MyApp.main unless there is a separate `@main` annotation in `Main`
@@ -645,9 +649,6 @@ const main = MyApp.main
This macro is new in Julia 1.11. At present, the precise semantics of `@main` are still subject to change.
"""
macro main(args...)
- if !isempty(args)
- error("`@main` is expected to be used as `(@main)` without macro arguments.")
- end
if isdefined(__module__, :main)
if Base.binding_module(__module__, :main) !== __module__
error("Symbol `main` is already a resolved import in module $(__module__). `@main` must be used in the defining module.")
@@ -658,5 +659,9 @@ macro main(args...)
global main
global var"#__main_is_entrypoint__#"::Bool = true
end)
- esc(:main)
+ if !isempty(args)
+ Expr(:call, esc(:main), map(esc, args)...)
+ else
+ esc(:main)
+ end
end
diff --git a/base/cmd.jl b/base/cmd.jl
index b46c8293cdf3c..12d576e0e4c4b 100644
--- a/base/cmd.jl
+++ b/base/cmd.jl
@@ -504,7 +504,7 @@ julia> run(cm)
Process(`echo 1`, ProcessExited(0))
```
"""
-macro cmd(str)
+macro cmd(str::String)
cmd_ex = shell_parse(str, special=shell_special, filename=String(__source__.file))[1]
return :(cmd_gen($(esc(cmd_ex))))
end
diff --git a/base/coreir.jl b/base/coreir.jl
index 5199dfd35f028..59422afb44add 100644
--- a/base/coreir.jl
+++ b/base/coreir.jl
@@ -49,5 +49,5 @@ while processing a call, then `Conditional` everywhere else.
"""
Core.InterConditional
-InterConditional(var::SlotNumber, @nospecialize(thentype), @nospecialize(elsetype)) =
+Core.InterConditional(var::SlotNumber, @nospecialize(thentype), @nospecialize(elsetype)) =
InterConditional(slot_id(var), thentype, elsetype)
diff --git a/base/deprecated.jl b/base/deprecated.jl
index cffff05d954d1..eeb7c0e60638e 100644
--- a/base/deprecated.jl
+++ b/base/deprecated.jl
@@ -353,6 +353,7 @@ end
@deprecate one(i::CartesianIndex) oneunit(i)
@deprecate one(I::Type{CartesianIndex{N}}) where {N} oneunit(I)
+import .MPFR: BigFloat
@deprecate BigFloat(x, prec::Int) BigFloat(x; precision=prec)
@deprecate BigFloat(x, prec::Int, rounding::RoundingMode) BigFloat(x, rounding; precision=prec)
@deprecate BigFloat(x::Real, prec::Int) BigFloat(x; precision=prec)
@@ -531,4 +532,29 @@ end
# BEGIN 1.12 deprecations
+@deprecate isbindingresolved(m::Module, var::Symbol) true false
+
+"""
+ isbindingresolved(m::Module, s::Symbol) -> Bool
+
+Returns whether the binding of a symbol in a module is resolved.
+
+See also: [`isexported`](@ref), [`ispublic`](@ref), [`isdeprecated`](@ref)
+
+```jldoctest
+julia> module Mod
+ foo() = 17
+ end
+Mod
+
+julia> Base.isbindingresolved(Mod, :foo)
+true
+```
+
+!!! warning
+ This function is deprecated. The concept of binding "resolvedness" was removed in Julia 1.12.
+ The function now always returns `true`.
+"""
+isbindingresolved
+
# END 1.12 deprecations
diff --git a/base/docs/Docs.jl b/base/docs/Docs.jl
index 061a94bffd9cf..ae3891e218824 100644
--- a/base/docs/Docs.jl
+++ b/base/docs/Docs.jl
@@ -75,8 +75,8 @@ const META = gensym(:meta)
const METAType = IdDict{Any,Any}
function meta(m::Module; autoinit::Bool=true)
- if !isdefinedglobal(m, META)
- return autoinit ? invokelatest(initmeta, m) : nothing
+ if !invokelatest(isdefinedglobal, m, META)
+ return autoinit ? initmeta(m) : nothing
end
# TODO: This `invokelatest` is not technically required, but because
# of the automatic constant backdating is currently required to avoid
@@ -85,13 +85,13 @@ function meta(m::Module; autoinit::Bool=true)
end
function initmeta(m::Module)
- if !isdefinedglobal(m, META)
+ if !invokelatest(isdefinedglobal, m, META)
val = METAType()
Core.eval(m, :(const $META = $val))
push!(modules, m)
return val
end
- return getglobal(m, META)
+ return invokelatest(getglobal, m, META)
end
function signature!(tv::Vector{Any}, expr::Expr)
diff --git a/base/error.jl b/base/error.jl
index 276555033443a..3ea7210652dad 100644
--- a/base/error.jl
+++ b/base/error.jl
@@ -240,13 +240,14 @@ macro assert(ex, msgs...)
msg = Main.Base.string(msg)
else
# string() might not be defined during bootstrap
- msg = :(Main.Base.inferencebarrier(_assert_tostring)($(Expr(:quote,msg))))
+ msg = :(_assert_tostring($(Expr(:quote,msg))))
end
return :($(esc(ex)) ? $(nothing) : throw(AssertionError($msg)))
end
# this may be overridden in contexts where `string(::Expr)` doesn't work
-_assert_tostring(msg) = isdefined(Main, :Base) ? Main.Base.string(msg) :
+_assert_tostring(@nospecialize(msg)) = Core.compilerbarrier(:type, __assert_tostring)(msg)
+__assert_tostring(msg) = isdefined(Main, :Base) ? Main.Base.string(msg) :
(Core.println(msg); "Error during bootstrap. See stdout.")
struct ExponentialBackOff
diff --git a/base/errorshow.jl b/base/errorshow.jl
index d4b9b3666fbb7..de315914c8b5c 100644
--- a/base/errorshow.jl
+++ b/base/errorshow.jl
@@ -588,8 +588,6 @@ function show_method_candidates(io::IO, ex::MethodError, kwargs=[])
end
if ex.world < reinterpret(UInt, method.primary_world)
print(iob, " (method too new to be called from this world context.)")
- elseif ex.world > reinterpret(UInt, method.deleted_world)
- print(iob, " (method deleted before this world age.)")
end
println(iob)
@@ -918,7 +916,7 @@ function _collapse_repeated_frames(trace)
m, last_m = StackTraces.frame_method_or_module(frame),
StackTraces.frame_method_or_module(last_frame)
if m isa Method && last_m isa Method
- params, last_params = Base.unwrap_unionall(m.sig).parameters, Base.unwrap_unionall(last_m.sig).parameters
+ params, last_params = Base.unwrap_unionall(m.sig).parameters::SimpleVector, Base.unwrap_unionall(last_m.sig).parameters::SimpleVector
if last_m.nkw != 0
pos_sig_params = last_params[(last_m.nkw+2):end]
issame = true
@@ -1141,6 +1139,98 @@ end
Experimental.register_error_hint(fielderror_listfields_hint_handler, FieldError)
+function UndefVarError_hint(io::IO, ex::UndefVarError)
+ var = ex.var
+ if isdefined(ex, :scope)
+ scope = ex.scope
+ if scope isa Module
+ bpart = Base.lookup_binding_partition(ex.world, GlobalRef(scope, var))
+ kind = Base.binding_kind(bpart)
+ if kind === Base.PARTITION_KIND_GLOBAL || kind === Base.PARTITION_KIND_UNDEF_CONST || kind == Base.PARTITION_KIND_DECLARED
+ print(io, "\nSuggestion: add an appropriate import or assignment. This global was declared but not assigned.")
+ elseif kind === Base.PARTITION_KIND_FAILED
+ print(io, "\nHint: It looks like two or more modules export different ",
+ "bindings with this name, resulting in ambiguity. Try explicitly ",
+ "importing it from a particular module, or qualifying the name ",
+ "with the module it should come from.")
+ elseif kind === Base.PARTITION_KIND_GUARD
+ print(io, "\nSuggestion: check for spelling errors or missing imports.")
+ elseif Base.is_some_explicit_imported(kind)
+ print(io, "\nSuggestion: this global was defined as `$(Base.partition_restriction(bpart).globalref)` but not assigned a value.")
+ elseif kind === Base.PARTITION_KIND_BACKDATED_CONST
+ print(io, "\nSuggestion: define the const at top-level before running function that uses it (stricter Julia v1.12+ rule).")
+ end
+ elseif scope === :static_parameter
+ print(io, "\nSuggestion: run Test.detect_unbound_args to detect method arguments that do not fully constrain a type parameter.")
+ elseif scope === :local
+ print(io, "\nSuggestion: check for an assignment to a local variable that shadows a global of the same name.")
+ end
+ else
+ scope = undef
+ end
+ if scope !== Base
+ warned = _UndefVarError_warnfor(io, [Base], var)
+
+ if !warned
+ modules_to_check = (m for m in Base.loaded_modules_order
+ if m !== Core && m !== Base && m !== Main && m !== scope)
+ warned |= _UndefVarError_warnfor(io, modules_to_check, var)
+ end
+
+ warned || _UndefVarError_warnfor(io, [Core, Main], var)
+ end
+ return nothing
+end
+
+function _UndefVarError_warnfor(io::IO, modules, var::Symbol)
+ active_mod = Base.active_module()
+
+ warned = false
+ # collect modules which export or make public the variable by
+ # the module in which the variable is defined
+ to_warn_about = Dict{Module, Vector{Module}}()
+ for m in modules
+ # only include in info if binding has a value and is exported or public
+ if !Base.isdefined(m, var) || (!Base.isexported(m, var) && !Base.ispublic(m, var))
+ continue
+ end
+ warned = true
+
+ # handle case where the undefined variable is the name of a loaded module
+ if Symbol(m) == var && !isdefined(active_mod, var)
+ print(io, "\nHint: $m is loaded but not imported in the active module $active_mod.")
+ continue
+ end
+
+ binding_m = Base.binding_module(m, var)
+ if !haskey(to_warn_about, binding_m)
+ to_warn_about[binding_m] = [m]
+ else
+ push!(to_warn_about[binding_m], m)
+ end
+ end
+
+ for (binding_m, modules) in pairs(to_warn_about)
+ print(io, "\nHint: a global variable of this name also exists in ", binding_m, ".")
+ for m in modules
+ m == binding_m && continue
+ how_available = if Base.isexported(m, var)
+ "exported by"
+ elseif Base.ispublic(m, var)
+ "declared public in"
+ end
+ print(io, "\n - Also $how_available $m")
+ if !isdefined(active_mod, nameof(m)) || (getproperty(active_mod, nameof(m)) !== m)
+ print(io, " (loaded but not imported in $active_mod)")
+ end
+ print(io, ".")
+ end
+ end
+ return warned
+end
+
+Base.Experimental.register_error_hint(UndefVarError_hint, UndefVarError)
+
# ExceptionStack implementation
size(s::ExceptionStack) = size(s.stack)
getindex(s::ExceptionStack, i::Int) = s.stack[i]
diff --git a/base/essentials.jl b/base/essentials.jl
index 5db7a5f6fb0d9..5068acf24cbc1 100644
--- a/base/essentials.jl
+++ b/base/essentials.jl
@@ -323,7 +323,7 @@ macro _nothrow_meta()
#=:consistent_overlay=#false,
#=:nortcall=#false))
end
-# can be used in place of `@assume_effects :nothrow` (supposed to be used for bootstrapping)
+# can be used in place of `@assume_effects :noub` (supposed to be used for bootstrapping)
macro _noub_meta()
return _is_internal(__module__) && Expr(:meta, Expr(:purity,
#=:consistent=#false,
@@ -690,6 +690,8 @@ cconvert(::Type{<:Ptr}, x) = x # but defer the conversion to Ptr to unsafe_conve
unsafe_convert(::Type{T}, x::T) where {T} = x # unsafe_convert (like convert) defaults to assuming the convert occurred
unsafe_convert(::Type{T}, x::T) where {T<:Ptr} = x # to resolve ambiguity with the next method
unsafe_convert(::Type{P}, x::Ptr) where {P<:Ptr} = convert(P, x)
+unsafe_convert(::Type{Ptr{UInt8}}, s::String) = ccall(:jl_string_ptr, Ptr{UInt8}, (Any,), s)
+unsafe_convert(::Type{Ptr{Int8}}, s::String) = ccall(:jl_string_ptr, Ptr{Int8}, (Any,), s)
"""
reinterpret(::Type{Out}, x::In)
@@ -1037,63 +1039,6 @@ end
Val(x) = Val{x}()
-"""
- invokelatest(f, args...; kwargs...)
-
-Calls `f(args...; kwargs...)`, but guarantees that the most recent method of `f`
-will be executed. This is useful in specialized circumstances,
-e.g. long-running event loops or callback functions that may
-call obsolete versions of a function `f`.
-(The drawback is that `invokelatest` is somewhat slower than calling
-`f` directly, and the type of the result cannot be inferred by the compiler.)
-
-!!! compat "Julia 1.9"
- Prior to Julia 1.9, this function was not exported, and was called as `Base.invokelatest`.
-"""
-function invokelatest(@nospecialize(f), @nospecialize args...; kwargs...)
- @inline
- kwargs = merge(NamedTuple(), kwargs)
- if isempty(kwargs)
- return Core._call_latest(f, args...)
- end
- return Core._call_latest(Core.kwcall, kwargs, f, args...)
-end
-
-"""
- invoke_in_world(world, f, args...; kwargs...)
-
-Call `f(args...; kwargs...)` in a fixed world age, `world`.
-
-This is useful for infrastructure running in the user's Julia session which is
-not part of the user's program. For example, things related to the REPL, editor
-support libraries, etc. In these cases it can be useful to prevent unwanted
-method invalidation and recompilation latency, and to prevent the user from
-breaking supporting infrastructure by mistake.
-
-The current world age can be queried using [`Base.get_world_counter()`](@ref)
-and stored for later use within the lifetime of the current Julia session, or
-when serializing and reloading the system image.
-
-Technically, `invoke_in_world` will prevent any function called by `f` from
-being extended by the user during their Julia session. That is, generic
-function method tables seen by `f` (and any functions it calls) will be frozen
-as they existed at the given `world` age. In a sense, this is like the opposite
-of [`invokelatest`](@ref).
-
-!!! note
- It is not valid to store world ages obtained in precompilation for later use.
- This is because precompilation generates a "parallel universe" where the
- world age refers to system state unrelated to the main Julia session.
-"""
-function invoke_in_world(world::UInt, @nospecialize(f), @nospecialize args...; kwargs...)
- @inline
- kwargs = Base.merge(NamedTuple(), kwargs)
- if isempty(kwargs)
- return Core._call_in_world(world, f, args...)
- end
- return Core._call_in_world(world, Core.kwcall, kwargs, f, args...)
-end
-
"""
inferencebarrier(x)
diff --git a/base/experimental.jl b/base/experimental.jl
index e35e920298c3d..efaf4bd33a820 100644
--- a/base/experimental.jl
+++ b/base/experimental.jl
@@ -163,7 +163,7 @@ macro max_methods(n::Int, fdef::Expr)
end
"""
- Experimental.@compiler_options optimize={0,1,2,3} compile={yes,no,all,min} infer={yes,no} max_methods={default,1,2,3,4}
+ Experimental.@compiler_options optimize={0,1,2,3} compile={yes,no,all,min} infer={true,false} max_methods={default,1,2,3,4}
Set compiler options for code in the enclosing module. Options correspond directly to
command-line options with the same name, where applicable. The following options
@@ -658,7 +658,7 @@ function wait_with_timeout(c::GenericCondition; first::Bool=false, timeout::Real
# Confirm that the waiting task is still in the wait queue and remove it. If
# the task is not in the wait queue, it must have been notified already so we
# don't do anything here.
- if !waiter_left[] && ct.queue == c.waitq
+ if !waiter_left[] && ct.queue === c.waitq
dosched = true
Base.list_deletefirst!(c.waitq, ct)
end
diff --git a/base/exports.jl b/base/exports.jl
index d81067478dd55..2e0bb3ccfe4cf 100644
--- a/base/exports.jl
+++ b/base/exports.jl
@@ -1,5 +1,44 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license
+# Re-exports from `Core`
+export Core,
+ # key types
+ Any, DataType, Vararg, NTuple,
+ Tuple, Type, UnionAll, TypeVar, Union, Nothing, Cvoid,
+ AbstractArray, DenseArray, NamedTuple, Pair,
+ # special objects
+ Function, Method, Module, Symbol, Task, UndefInitializer, undef, WeakRef, VecElement,
+ Array, Memory, MemoryRef, AtomicMemory, AtomicMemoryRef, GenericMemory, GenericMemoryRef,
+ # numeric types
+ Number, Real, Integer, Bool, Ref, Ptr,
+ AbstractFloat, Float16, Float32, Float64,
+ Signed, Int, Int8, Int16, Int32, Int64, Int128,
+ Unsigned, UInt, UInt8, UInt16, UInt32, UInt64, UInt128,
+ # string types
+ AbstractChar, Char, AbstractString, String, IO,
+ # errors
+ ErrorException, BoundsError, DivideError, DomainError, Exception,
+ InterruptException, InexactError, OutOfMemoryError, ReadOnlyMemoryError,
+ OverflowError, StackOverflowError, SegmentationFault, UndefRefError, UndefVarError,
+ TypeError, ArgumentError, MethodError, AssertionError, LoadError, InitError,
+ UndefKeywordError, ConcurrencyViolationError, FieldError,
+ # AST representation
+ Expr, QuoteNode, LineNumberNode, GlobalRef,
+ # object model functions
+ fieldtype, getfield, setfield!, swapfield!, modifyfield!, replacefield!, setfieldonce!,
+ nfields, throw, tuple, ===, isdefined,
+ # access to globals
+ getglobal, setglobal!, swapglobal!, modifyglobal!, replaceglobal!, setglobalonce!, isdefinedglobal,
+ # ifelse, sizeof # not exported, to avoid conflicting with Base
+ # type reflection
+ <:, typeof, isa, typeassert,
+ # method reflection
+ applicable, invoke,
+ # constants
+ nothing, Main,
+ # backwards compatibility
+ arrayref, arrayset, arraysize, const_arrayref
+
export
# Modules
Meta,
diff --git a/base/expr.jl b/base/expr.jl
index d71723ee26f1f..ab42d5bb1933b 100644
--- a/base/expr.jl
+++ b/base/expr.jl
@@ -532,16 +532,20 @@ The `:consistent` setting asserts that for egal (`===`) inputs:
contents) are not egal.
!!! note
- The `:consistent`-cy assertion is made world-age wise. More formally, write
- ``fᵢ`` for the evaluation of ``f`` in world-age ``i``, then this setting requires:
+ The `:consistent`-cy assertion is made with respect to a particular world range `R`.
+ More formally, write ``fᵢ`` for the evaluation of ``f`` in world-age ``i``, then this setting requires:
```math
- ∀ i, x, y: x ≡ y → fᵢ(x) ≡ fᵢ(y)
+ ∀ i ∈ R, j ∈ R, x, y: x ≡ y → fᵢ(x) ≡ fⱼ(y)
```
- However, for two world ages ``i``, ``j`` s.t. ``i ≠ j``, we may have ``fᵢ(x) ≢ fⱼ(y)``.
+
+ For `@assume_effects`, the range `R` is `m.primary_world:m.deleted_world` of
+ the annotated or containing method.
+
+ For ordinary code instances, `R` is `ci.min_world:ci.max_world`.
A further implication is that `:consistent` functions may not make their
return value dependent on the state of the heap or any other global state
- that is not constant for a given world age.
+ that is not constant over the given world age range.
!!! note
The `:consistent`-cy includes all legal rewrites performed by the optimizer.
@@ -1344,6 +1348,10 @@ function make_atomic(order, ex)
op = :+
elseif ex.head === :(-=)
op = :-
+ elseif ex.head === :(|=)
+ op = :|
+ elseif ex.head === :(&=)
+ op = :&
elseif @isdefined string
shead = string(ex.head)
if endswith(shead, '=')
@@ -1660,7 +1668,8 @@ function generated_body_to_codeinfo(ex::Expr, defmod::Module, isva::Bool)
ci = ccall(:jl_expand, Any, (Any, Any), ex, defmod)
if !isa(ci, CodeInfo)
if isa(ci, Expr) && ci.head === :error
- error("syntax: $(ci.args[1])")
+ msg = ci.args[1]
+ error(msg isa String ? strcat("syntax: ", msg) : msg)
end
error("The function body AST defined by this @generated function is not pure. This likely means it contains a closure, a comprehension or a generator.")
end
@@ -1694,6 +1703,6 @@ function (g::Core.GeneratedFunctionStub)(world::UInt, source::Method, @nospecial
Expr(:meta, :pop_loc))))
spnames = g.spnames
return generated_body_to_codeinfo(spnames === Core.svec() ? lam : Expr(Symbol("with-static-parameters"), lam, spnames...),
- typename(typeof(g.gen)).module,
+ source.module,
source.isva)
end
diff --git a/base/filesystem.jl b/base/filesystem.jl
index bc1f4942877e8..2934fc15e392f 100644
--- a/base/filesystem.jl
+++ b/base/filesystem.jl
@@ -137,11 +137,11 @@ export File,
S_IROTH, S_IWOTH, S_IXOTH, S_IRWXO
import .Base:
- IOError, _UVError, _sizeof_uv_fs, check_open, close, eof, eventloop, fd, isopen,
- bytesavailable, position, read, read!, readavailable, seek, seekend, show,
- skip, stat, unsafe_read, unsafe_write, write, transcode, uv_error,
+ IOError, _UVError, _sizeof_uv_fs, check_open, close, closewrite, eof, eventloop, fd, isopen,
+ bytesavailable, position, read, read!, readbytes!, readavailable, seek, seekend, show,
+ skip, stat, unsafe_read, unsafe_write, write, transcode, uv_error, _uv_error,
setup_stdio, rawhandle, OS_HANDLE, INVALID_OS_HANDLE, windowserror, filesize,
- isexecutable, isreadable, iswritable, MutableDenseArrayType
+ isexecutable, isreadable, iswritable, MutableDenseArrayType, truncate
import .Base.RefValue
diff --git a/base/float.jl b/base/float.jl
index faded5cd5978c..1878a6a953360 100644
--- a/base/float.jl
+++ b/base/float.jl
@@ -2,6 +2,9 @@
const IEEEFloat = Union{Float16, Float32, Float64}
+import Core: Float16, Float32, Float64, AbstractFloat
+import Core: Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128
+
## floating point traits ##
"""
@@ -527,7 +530,8 @@ function _to_float(number::U, ep) where {U<:Unsigned}
return reinterpret(F, bits)
end
-@assume_effects :terminates_locally :nothrow function rem_internal(x::T, y::T) where {T<:IEEEFloat}
+function rem_internal(x::T, y::T) where {T<:IEEEFloat}
+ @_terminates_locally_meta
xuint = reinterpret(Unsigned, x)
yuint = reinterpret(Unsigned, y)
if xuint <= yuint
@@ -622,13 +626,15 @@ end
isequal(x::T, y::T) where {T<:IEEEFloat} = fpiseq(x, y)
# interpret as sign-magnitude integer
-@inline function _fpint(x)
+function _fpint(x)
+ @inline
IntT = inttype(typeof(x))
ix = reinterpret(IntT, x)
return ifelse(ix < zero(IntT), ix ⊻ typemax(IntT), ix)
end
-@inline function isless(a::T, b::T) where T<:IEEEFloat
+function isless(a::T, b::T) where T<:IEEEFloat
+ @inline
(isnan(a) || isnan(b)) && return !isnan(a)
return _fpint(a) < _fpint(b)
@@ -717,84 +723,6 @@ See also: [`Inf`](@ref), [`iszero`](@ref), [`isfinite`](@ref), [`isnan`](@ref).
isinf(x::Real) = !isnan(x) & !isfinite(x)
isinf(x::IEEEFloat) = abs(x) === oftype(x, Inf)
-const hx_NaN = hash_uint64(reinterpret(UInt64, NaN))
-function hash(x::Float64, h::UInt)
- # see comments on trunc and hash(Real, UInt)
- if typemin(Int64) <= x < typemax(Int64)
- xi = fptosi(Int64, x)
- if isequal(xi, x)
- return hash(xi, h)
- end
- elseif typemin(UInt64) <= x < typemax(UInt64)
- xu = fptoui(UInt64, x)
- if isequal(xu, x)
- return hash(xu, h)
- end
- elseif isnan(x)
- return hx_NaN ⊻ h # NaN does not have a stable bit pattern
- end
- return hash_uint64(bitcast(UInt64, x)) - 3h
-end
-
-hash(x::Float32, h::UInt) = hash(Float64(x), h)
-
-function hash(x::Float16, h::UInt)
- # see comments on trunc and hash(Real, UInt)
- if isfinite(x) # all finite Float16 fit in Int64
- xi = fptosi(Int64, x)
- if isequal(xi, x)
- return hash(xi, h)
- end
- elseif isnan(x)
- return hx_NaN ⊻ h # NaN does not have a stable bit pattern
- end
- return hash_uint64(bitcast(UInt64, Float64(x))) - 3h
-end
-
-## generic hashing for rational values ##
-function hash(x::Real, h::UInt)
- # decompose x as num*2^pow/den
- num, pow, den = decompose(x)
-
- # handle special values
- num == 0 && den == 0 && return hash(NaN, h)
- num == 0 && return hash(ifelse(den > 0, 0.0, -0.0), h)
- den == 0 && return hash(ifelse(num > 0, Inf, -Inf), h)
-
- # normalize decomposition
- if den < 0
- num = -num
- den = -den
- end
- num_z = trailing_zeros(num)
- num >>= num_z
- den_z = trailing_zeros(den)
- den >>= den_z
- pow += num_z - den_z
- # If the real can be represented as an Int64, UInt64, or Float64, hash as those types.
- # To be an Integer the denominator must be 1 and the power must be non-negative.
- if den == 1
- # left = ceil(log2(num*2^pow))
- left = top_set_bit(abs(num)) + pow
- # 2^-1074 is the minimum Float64 so if the power is smaller, not a Float64
- if -1074 <= pow
- if 0 <= pow # if pow is non-negative, it is an integer
- left <= 63 && return hash(Int64(num) << Int(pow), h)
- left <= 64 && !signbit(num) && return hash(UInt64(num) << Int(pow), h)
- end # typemin(Int64) handled by Float64 case
- # 2^1024 is the maximum Float64 so if the power is greater, not a Float64
- # Float64s only have 53 mantisa bits (including implicit bit)
- left <= 1024 && left - pow <= 53 && return hash(ldexp(Float64(num), pow), h)
- end
- else
- h = hash_integer(den, h)
- end
- # handle generic rational values
- h = hash_integer(pow, h)
- h = hash_integer(num, h)
- return h
-end
-
#=
`decompose(x)`: non-canonical decomposition of rational values as `num*2^pow/den`.
diff --git a/base/gmp.jl b/base/gmp.jl
index 4d2b4b66ac41b..97488551f60f6 100644
--- a/base/gmp.jl
+++ b/base/gmp.jl
@@ -13,6 +13,8 @@ import .Base: *, +, -, /, <, <<, >>, >>>, <=, ==, >, >=, ^, (~), (&), (|), xor,
sign, hastypemax, isodd, iseven, digits!, hash, hash_integer, top_set_bit,
clamp, unsafe_takestring
+import Core: Signed, Float16, Float32, Float64
+
if Clong == Int32
const ClongMax = Union{Int8, Int16, Int32}
const CulongMax = Union{UInt8, UInt16, UInt32}
diff --git a/base/hamt.jl b/base/hamt.jl
index e3e4b4bd03ba9..c77c592b17e58 100644
--- a/base/hamt.jl
+++ b/base/hamt.jl
@@ -239,11 +239,11 @@ or grows the HAMT by inserting a new trie instead.
end
end
-length(::Leaf) = 1
-length(trie::HAMT) = sum((length(trie.data[i]) for i in eachindex(trie.data)), init=0)
+Base.length(::Leaf) = 1
+Base.length(trie::HAMT) = sum((length(trie.data[i]) for i in eachindex(trie.data)), init=0)
-isempty(::Leaf) = false
-function isempty(trie::HAMT)
+Base.isempty(::Leaf) = false
+function Base.isempty(trie::HAMT)
if islevel_empty(trie)
return true
end
@@ -251,7 +251,7 @@ function isempty(trie::HAMT)
end
# DFS
-function iterate(trie::HAMT, state=nothing)
+function Base.iterate(trie::HAMT, state=nothing)
if state === nothing
state = (;parent=nothing, trie, i=1)
end
diff --git a/base/hashing.jl b/base/hashing.jl
index d4a6217de6edb..868f5e1ad9abf 100644
--- a/base/hashing.jl
+++ b/base/hashing.jl
@@ -98,6 +98,87 @@ function hash_integer(n::Integer, h::UInt)
return h
end
+## efficient value-based hashing of floats ##
+
+const hx_NaN = hash_uint64(reinterpret(UInt64, NaN))
+function hash(x::Float64, h::UInt)
+ # see comments on trunc and hash(Real, UInt)
+ if typemin(Int64) <= x < typemax(Int64)
+ xi = fptosi(Int64, x)
+ if isequal(xi, x)
+ return hash(xi, h)
+ end
+ elseif typemin(UInt64) <= x < typemax(UInt64)
+ xu = fptoui(UInt64, x)
+ if isequal(xu, x)
+ return hash(xu, h)
+ end
+ elseif isnan(x)
+ return hx_NaN ⊻ h # NaN does not have a stable bit pattern
+ end
+ return hash_uint64(bitcast(UInt64, x)) - 3h
+end
+
+hash(x::Float32, h::UInt) = hash(Float64(x), h)
+
+function hash(x::Float16, h::UInt)
+ # see comments on trunc and hash(Real, UInt)
+ if isfinite(x) # all finite Float16 fit in Int64
+ xi = fptosi(Int64, x)
+ if isequal(xi, x)
+ return hash(xi, h)
+ end
+ elseif isnan(x)
+ return hx_NaN ⊻ h # NaN does not have a stable bit pattern
+ end
+ return hash_uint64(bitcast(UInt64, Float64(x))) - 3h
+end
+
+## generic hashing for rational values ##
+function hash(x::Real, h::UInt)
+ # decompose x as num*2^pow/den
+ num, pow, den = decompose(x)
+
+ # handle special values
+ num == 0 && den == 0 && return hash(NaN, h)
+ num == 0 && return hash(ifelse(den > 0, 0.0, -0.0), h)
+ den == 0 && return hash(ifelse(num > 0, Inf, -Inf), h)
+
+ # normalize decomposition
+ if den < 0
+ num = -num
+ den = -den
+ end
+ num_z = trailing_zeros(num)
+ num >>= num_z
+ den_z = trailing_zeros(den)
+ den >>= den_z
+ pow += num_z - den_z
+ # If the real can be represented as an Int64, UInt64, or Float64, hash as those types.
+ # To be an Integer the denominator must be 1 and the power must be non-negative.
+ if den == 1
+ # left = ceil(log2(num*2^pow))
+ left = top_set_bit(abs(num)) + pow
+ # 2^-1074 is the minimum Float64 so if the power is smaller, not a Float64
+ if -1074 <= pow
+ if 0 <= pow # if pow is non-negative, it is an integer
+ left <= 63 && return hash(Int64(num) << Int(pow), h)
+ left <= 64 && !signbit(num) && return hash(UInt64(num) << Int(pow), h)
+ end # typemin(Int64) handled by Float64 case
+ # 2^1024 is the maximum Float64 so if the power is greater, not a Float64
+ # Float64s only have 53 mantisa bits (including implicit bit)
+ left <= 1024 && left - pow <= 53 && return hash(ldexp(Float64(num), pow), h)
+ end
+ else
+ h = hash_integer(den, h)
+ end
+ # handle generic rational values
+ h = hash_integer(pow, h)
+ h = hash_integer(num, h)
+ return h
+end
+
+
## symbol & expression hashing ##
if UInt === UInt64
diff --git a/base/idset.jl b/base/idset.jl
index c46d49968ff73..95c9bf784f557 100644
--- a/base/idset.jl
+++ b/base/idset.jl
@@ -92,8 +92,17 @@ function sizehint!(s::IdSet, newsz)
nothing
end
+function _zero!(a::Memory{<:BitInteger})
+ t = @_gc_preserve_begin a
+ p = unsafe_convert(Ptr{Cvoid}, a)
+ T = eltype(a)
+ memset(p, 0x0, (sizeof(T) * length(a)) % UInt)
+ @_gc_preserve_end t
+ return a
+end
+
function empty!(s::IdSet)
- fill!(s.idxs, 0x00)
+ _zero!(s.idxs)
list = s.list
for i = 1:s.max
_unsetindex!(list, i)
diff --git a/base/initdefs.jl b/base/initdefs.jl
index f7693813239c6..90f213c468bba 100644
--- a/base/initdefs.jl
+++ b/base/initdefs.jl
@@ -284,22 +284,14 @@ function load_path_expand(env::AbstractString)::Union{String, Nothing}
env == "@temp" && return mktempdir()
env == "@stdlib" && return Sys.STDLIB
if startswith(env, "@script")
- if @isdefined(PROGRAM_FILE)
- dir = dirname(PROGRAM_FILE)
- else
- cmds = unsafe_load_commands(JLOptions().commands)
- if any(cmd::Pair{Char, String}->cmd_suppresses_program(first(cmd)), cmds)
- # Usage error. The user did not pass a script.
- return nothing
- end
- dir = dirname(ARGS[1])
- end
- if env == "@script" # complete match, not startswith, so search upwards
- return current_project(dir)
- else
- # starts with, so assume relative path is after
- return abspath(replace(env, "@script" => dir))
- end
+ program_file = JLOptions().program_file
+ program_file = program_file != C_NULL ? unsafe_string(program_file) : nothing
+ isnothing(program_file) && return nothing # User did not pass a script
+
+ # Expand trailing relative path
+ dir = dirname(program_file)
+ dir = env != "@script" ? (dir * env[length("@script")+1:end]) : dir
+ return current_project(dir)
end
env = replace(env, '#' => VERSION.major, count=1)
env = replace(env, '#' => VERSION.minor, count=1)
diff --git a/base/int.jl b/base/int.jl
index 8a80f90f7e2c1..24b7abc646281 100644
--- a/base/int.jl
+++ b/base/int.jl
@@ -286,8 +286,14 @@ function mod(x::T, y::T) where T<:Integer
y == -1 && return T(0) # avoid potential overflow in fld
return x - fld(x, y) * y
end
-mod(x::BitSigned, y::Unsigned) = rem(y + unsigned(rem(x, y)), y)
-mod(x::Unsigned, y::Signed) = rem(y + signed(rem(x, y)), y)
+function mod(x::BitSigned, y::Unsigned)
+ remval = rem(x, y) # correct iff remval>=0
+ return unsigned(remval + (remval0 so correct iff y>0 or remval==0
+ return remval + (!iszero(remval) && y typemax(p) && throw(DomainError(x,"argument is beyond the range of type of the base"))
+ hastypemax(typeof(p)) && x > typemax(p) &&
+ throw(DomainError(x,"argument is beyond the range of type of the base"))
p >= x && return p
wp = a^n
wp > p || throw(OverflowError("result is beyond the range of type of the base"))
@@ -608,9 +612,10 @@ function prevpow(a::T, x::Real) where T <: Real
n = floor(Integer,log(a, x))
# round-off error of log can go either direction, so need some checks
p = a^n
- x > typemax(p) && throw(DomainError(x,"argument is beyond the range of type of the base"))
+ hastypemax(typeof(p)) && x > typemax(p) &&
+ throw(DomainError(x,"argument is beyond the range of type of the base"))
if a isa Integer
- wp, overflow = mul_with_overflow(a, p)
+ wp, overflow = mul_with_overflow(promote(a, p)...)
wp <= x && !overflow && return wp
else
wp = a^(n+1)
@@ -842,7 +847,7 @@ function append_c_digits(olength::Int, digits::Unsigned, buf, pos::Int)
while i >= 2
d, c = divrem(digits, 0x64)
digits = oftype(digits, d)
- @inbounds d100 = _dec_d100[(c % Int) + 1]
+ @inbounds d100 = _dec_d100[(c % Int)::Int + 1]
@inbounds buf[pos + i - 2] = d100 % UInt8
@inbounds buf[pos + i - 1] = (d100 >> 0x8) % UInt8
i -= 2
diff --git a/base/invalidation.jl b/base/invalidation.jl
index 36b867ede2868..e974bcd226de8 100644
--- a/base/invalidation.jl
+++ b/base/invalidation.jl
@@ -113,44 +113,84 @@ function invalidate_method_for_globalref!(gr::GlobalRef, method::Method, invalid
end
end
-function invalidate_code_for_globalref!(gr::GlobalRef, invalidated_bpart::Core.BindingPartition, new_max_world::UInt)
- b = convert(Core.Binding, gr)
- try
- valid_in_valuepos = false
- foreach_module_mtable(gr.mod, new_max_world) do mt::Core.MethodTable
- for method in MethodList(mt)
+export_affecting_partition_flags(bpart::Core.BindingPartition) =
+ ((bpart.kind & PARTITION_MASK_KIND) == PARTITION_KIND_GUARD,
+ (bpart.kind & PARTITION_FLAG_EXPORTED) != 0,
+ (bpart.kind & PARTITION_FLAG_DEPRECATED) != 0)
+
+function invalidate_code_for_globalref!(b::Core.Binding, invalidated_bpart::Core.BindingPartition, new_bpart::Core.BindingPartition, new_max_world::UInt)
+ gr = b.globalref
+
+ (_, (ib, ibpart)) = Compiler.walk_binding_partition(b, invalidated_bpart, new_max_world)
+ (_, (nb, nbpart)) = Compiler.walk_binding_partition(b, new_bpart, new_max_world+1)
+
+ # `abstract_eval_partition_load` is the maximum amount of information that inference
+ # reads from a binding partition. If this information does not change - we do not need to
+ # invalidate any code that inference created, because we know that the result will not change.
+ need_to_invalidate_code =
+ Compiler.abstract_eval_partition_load(nothing, ib, ibpart) !==
+ Compiler.abstract_eval_partition_load(nothing, nb, nbpart)
+
+ need_to_invalidate_export = export_affecting_partition_flags(invalidated_bpart) !==
+ export_affecting_partition_flags(new_bpart)
+
+ if need_to_invalidate_code
+ if (b.flags & BINDING_FLAG_ANY_IMPLICIT_EDGES) != 0
+ nmethods = ccall(:jl_module_scanned_methods_length, Csize_t, (Any,), gr.mod)
+ for i = 1:nmethods
+ method = ccall(:jl_module_scanned_methods_getindex, Any, (Any, Csize_t), gr.mod, i)::Method
invalidate_method_for_globalref!(gr, method, invalidated_bpart, new_max_world)
end
- return true
end
- b = convert(Core.Binding, gr)
if isdefined(b, :backedges)
for edge in b.backedges
if isa(edge, CodeInstance)
ccall(:jl_invalidate_code_instance, Cvoid, (Any, UInt), edge, new_max_world)
+ elseif isa(edge, Core.Binding)
+ isdefined(edge, :partitions) || continue
+ latest_bpart = edge.partitions
+ latest_bpart.max_world == typemax(UInt) || continue
+ is_some_imported(binding_kind(latest_bpart)) || continue
+ if is_some_binding_imported(binding_kind(latest_bpart))
+ partition_restriction(latest_bpart) === b || continue
+ end
+ invalidate_code_for_globalref!(edge, latest_bpart, latest_bpart, new_max_world)
else
invalidate_method_for_globalref!(gr, edge::Method, invalidated_bpart, new_max_world)
end
end
end
- catch err
- bt = catch_backtrace()
- invokelatest(Base.println, "Internal Error during invalidation:")
- invokelatest(Base.display_error, err, bt)
end
-end
-gr_needs_backedge_in_module(gr::GlobalRef, mod::Module) = gr.mod !== mod
+ if need_to_invalidate_code || need_to_invalidate_export
+ # This binding was exported - we need to check all modules that `using` us to see if they
+ # have a binding that is affected by this change.
+ usings_backedges = ccall(:jl_get_module_usings_backedges, Any, (Any,), gr.mod)
+ if usings_backedges !== nothing
+ for user::Module in usings_backedges::Vector{Any}
+ user_binding = ccall(:jl_get_module_binding_or_nothing, Any, (Any, Any), user, gr.name)
+ user_binding === nothing && continue
+ isdefined(user_binding, :partitions) || continue
+ latest_bpart = user_binding.partitions
+ latest_bpart.max_world == typemax(UInt) || continue
+ is_some_implicit(binding_kind(latest_bpart)) || continue
+ new_bpart = need_to_invalidate_export ?
+ ccall(:jl_maybe_reresolve_implicit, Any, (Any, Csize_t), user_binding, new_max_world) :
+ latest_bpart
+ if need_to_invalidate_code || new_bpart !== latest_bpart
+ invalidate_code_for_globalref!(convert(Core.Binding, user_binding), latest_bpart, new_bpart, new_max_world)
+ end
+ end
+ end
+ end
+end
+invalidate_code_for_globalref!(gr::GlobalRef, invalidated_bpart::Core.BindingPartition, new_bpart::Core.BindingPartition, new_max_world::UInt) =
+ invalidate_code_for_globalref!(convert(Core.Binding, gr), invalidated_bpart, new_bpart, new_max_world)
-# N.B.: This needs to match jl_maybe_add_binding_backedge
function maybe_add_binding_backedge!(b::Core.Binding, edge::Union{Method, CodeInstance})
- method = isa(edge, Method) ? edge : edge.def.def::Method
- gr_needs_backedge_in_module(b.globalref, method.module) || return
- if !isdefined(b, :backedges)
- b.backedges = Any[]
- end
- !isempty(b.backedges) && b.backedges[end] === edge && return
- push!(b.backedges, edge)
+ meth = isa(edge, Method) ? edge : Compiler.get_ci_mi(edge).def
+ ccall(:jl_maybe_add_binding_backedge, Cint, (Any, Any, Any), b, edge, meth)
+ return nothing
end
function binding_was_invalidated(b::Core.Binding)
@@ -159,26 +199,34 @@ function binding_was_invalidated(b::Core.Binding)
b.partitions.min_world > unsafe_load(cglobal(:jl_require_world, UInt))
end
-function scan_new_method!(methods_with_invalidated_source::IdSet{Method}, method::Method)
+function scan_new_method!(methods_with_invalidated_source::IdSet{Method}, method::Method, image_backedges_only::Bool)
isdefined(method, :source) || return
+ if image_backedges_only && !has_image_globalref(method)
+ return
+ end
src = _uncompressed_ir(method)
mod = method.module
foreachgr(src) do gr::GlobalRef
b = convert(Core.Binding, gr)
- binding_was_invalidated(b) && push!(methods_with_invalidated_source, method)
+ if binding_was_invalidated(b)
+ # TODO: We could turn this into an addition if condition. For now, use it as a reasonably cheap
+ # additional consistency chekc
+ @assert !image_backedges_only
+ push!(methods_with_invalidated_source, method)
+ end
maybe_add_binding_backedge!(b, method)
end
end
-function scan_new_methods(extext_methods::Vector{Any}, internal_methods::Vector{Any})
+function scan_new_methods(extext_methods::Vector{Any}, internal_methods::Vector{Any}, image_backedges_only::Bool)
methods_with_invalidated_source = IdSet{Method}()
for method in internal_methods
if isa(method, Method)
- scan_new_method!(methods_with_invalidated_source, method)
+ scan_new_method!(methods_with_invalidated_source, method, image_backedges_only)
end
end
for tme::Core.TypeMapEntry in extext_methods
- scan_new_method!(methods_with_invalidated_source, tme.func::Method)
+ scan_new_method!(methods_with_invalidated_source, tme.func::Method, image_backedges_only)
end
return methods_with_invalidated_source
end
diff --git a/base/iobuffer.jl b/base/iobuffer.jl
index 7e309b9ad586c..d121082f3585d 100644
--- a/base/iobuffer.jl
+++ b/base/iobuffer.jl
@@ -1,45 +1,168 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license
-## work with AbstractVector{UInt8} via I/O primitives ##
+# IOBuffer is a Memory{UInt8} backed IO type for in-memory IO.
+
+# Here, u represents used bytes (already read), X represents bytes still to read,
+# - represents bytes uninitialized data but which can be written to later.
+# . represents bytes before offset, which the buffer will not touch, until
+# a write operation happens.
+
+# .....uuuuuuuuuuuuuXXXXXXXXXXXXX------------
+# | | | | | |
+# | offset ptr size | maxsize
+# 1 lastindex(data)
+
+# N.B: `mark` does not correspond to any index in the buffer. Instead, it stores
+# the mark at virtual offset in the buffer.
+
+# AFTER COMPACTION
+
+# XXXXXXXXXXXXX--------------------------
+# || | | | |
+# |1 ptr size | maxsize
+# | lastindex(data)
+# offset (set to zero)
+
+# * The underlying array is always 1-indexed
+# * The IOBuffer has full control (ownership) of the underlying array, only when
+# buffer.write == true.
+# * Unreachable data can be deleted in the buffer's data, shifting the whole thing to the left
+# to make room for more data, without replacing or resizing data.
+# This can be done only if the buffer is not seekable
-# Stateful string
mutable struct GenericIOBuffer{T<:AbstractVector{UInt8}} <: IO
- data::T # T should support: getindex, setindex!, length, copyto!, similar, and (optionally) resize!
- reinit::Bool # if true, data needs to be re-allocated (after take!)
+ # T should support: getindex, setindex!, length, copyto!, similar, size and (optionally) resize!
+ data::T
+
+ # The user can take control of `data` out of this struct. When that happens, instead of eagerly allocating
+ # a new array, we set `.reinit` to true, and then allocate a new one when needed.
+ # If reinit is true, the buffer is writable, and offset_or_compacted and size is zero. See `take!`
+ reinit::Bool
readable::Bool
writable::Bool
- seekable::Bool # if not seekable, implementation is free to destroy (compact) past read data
- append::Bool # add data at end instead of at pointer
- size::Int # end pointer (and write pointer if append == true) + offset
- maxsize::Int # fixed array size (typically pre-allocated)
- ptr::Int # read (and maybe write) pointer + offset
- offset::Int # offset of ptr and size from actual start of data and actual size
- mark::Int # reset mark location for ptr (or <0 for no mark)
- function GenericIOBuffer{T}(data::T, readable::Bool, writable::Bool, seekable::Bool, append::Bool,
- maxsize::Integer) where T<:AbstractVector{UInt8}
- require_one_based_indexing(data)
- return new(data, false, readable, writable, seekable, append, length(data), maxsize, 1, 0, -1)
- end
+ # If not seekable, implementation is free to destroy (compact) data before ptr, unless
+ # it can be recovered using the mark by using `reset`.
+ # If it IS seekable, the user may always recover any data in 1:size by seeking,
+ # so no data can be destroyed.
+ # Non-seekable IOBuffers can only be constructed with `PipeBuffer`, which are writable,
+ # readable and append.
+ seekable::Bool
+
+ # If true, write new data to the index size+1 instead of the index ptr.
+ append::Bool
+
+ # Last index of `data` that has been written to. Data in size+1:end has not yet been used,
+ # and may contain arbitrary values.
+ # This value is always in 0 : lastindex(data)
+ size::Int
+
+ # When the buffer is resized, or a new buffer allocated, this is the maximum size of the buffer.
+ # A new GenericIOBuffer may be constructed with an existing data larger than `maxsize`.
+ # When that happensm we must make sure to not have more than `maxsize` bytes in the buffer,
+ # else reallocating will lose data. So, never write to indices > `maxsize + get_offset(io)`
+ # This value is always in 0:typemax(Int).
+ maxsize::Int
+
+ # Data is read/written from/to ptr, except in situations where append is true, in which case
+ # data is still read from ptr, but written to size+1.
+ # This value is always in offset + 1 : size+1
+ ptr::Int
+
+ # This field has two distinct meanings:
+ # If the value is positive, it encodes an offset of the start of the data in `data`.
+ # This is used if the buffer is instantiated from a Vector with non-zero memory offset.
+ # Then, the IOBuffer stores the underlying memory, and so the first data in the buffer
+ # is not at index 1.
+ # If the value is negative, then `-io.offset_or_compacted` gets the number of compacted
+ # bytes. That's the number of unused bytes deleted from a non-seekable stream to make space.
+ # We need to keep track of it in order to make `mark` and `position` etc work, that is,
+ # we need to know the virtual position of the mark even when an arbitrary number
+ # of unused bytes has been deleted due to compaction.
+ # Since compaction will move data in the buffer and thereby zero the offset, either the
+ # offset or the number of compacted bytes will be zero at any point, so both can be
+ # stored in one field.
+ # If offset: Value is always in 0:lastindex(data)
+ # If compacted: Value is in typemin(Int):0
+ offset_or_compacted::Int
+
+ # The mark is -1 if not set, else the zero-indexed virtual position of ptr in the buffer.
+ # Due to compaction and offset, this value is not an index into the buffer, but may be translated
+ # to an index.
+ # This value is in -1:typemax(Int)
+ mark::Int
+
+ # Unsafe constructor which does not do any checking
+ global function _new_generic_iobuffer(
+ ::Type{T},
+ data::T,
+ readable::Bool,
+ writable::Bool,
+ seekable::Bool,
+ append::Bool,
+ maxsize::Int,
+ ) where T<:AbstractVector{UInt8}
+ len = Int(length(data))::Int
+ return new{T}(data, false, readable, writable, seekable, append, len, maxsize, 1, 0, -1)
+ end
+end
+
+function GenericIOBuffer{T}(
+ data::T,
+ readable::Bool,
+ writable::Bool,
+ seekable::Bool,
+ append::Bool,
+ maxsize::Integer,
+ truncate::Bool,
+ ) where T<:AbstractVector{UInt8}
+ require_one_based_indexing(data)
+ mz = Int(maxsize)::Int
+ len = Int(length(data))::Int
+ if !truncate && mz < len
+ throw(ArgumentError("maxsize must not be smaller than data length"))
+ end
+ buf = _new_generic_iobuffer(T, data, readable, writable, seekable, append, mz)
+ if truncate
+ buf.size = buf.offset_or_compacted
+ end
+ buf
end
const IOBuffer = GenericIOBuffer{Memory{UInt8}}
function GenericIOBuffer(data::T, readable::Bool, writable::Bool, seekable::Bool, append::Bool,
- maxsize::Integer) where T<:AbstractVector{UInt8}
- GenericIOBuffer{T}(data, readable, writable, seekable, append, maxsize)
+ maxsize::Integer, truncate::Bool) where T<:AbstractVector{UInt8}
+ GenericIOBuffer{T}(data, readable, writable, seekable, append, maxsize, truncate)
end
+
+# For this method, we use the underlying Memory of the vector. Therefore, we need to set the,
+# ptr and size accordingly, so the buffer only uses the part of the memory that the vector does.
function GenericIOBuffer(data::Vector{UInt8}, readable::Bool, writable::Bool, seekable::Bool, append::Bool,
- maxsize::Integer)
+ maxsize::Integer, truncate::Bool)
ref = data.ref
- buf = GenericIOBuffer(ref.mem, readable, writable, seekable, append, maxsize)
+ mem = ref.mem
offset = memoryrefoffset(ref) - 1
- buf.ptr += offset
- buf.size = length(data) + offset
- buf.offset = offset
+ # The user may pass a vector of length <= maxsize, but where the underlying memory
+ # is larger than maxsize. Don't throw an error in that case.
+ mz = Int(maxsize)::Int
+ if !truncate && mz < length(data)
+ throw(ArgumentError("maxsize must not be smaller than data length"))
+ end
+ buf = _new_generic_iobuffer(Memory{UInt8}, mem, readable, writable, seekable, append, mz)
+ buf.offset_or_compacted = offset
+ buf.ptr = offset + 1
+ if truncate
+ buf.size = offset
+ else
+ buf.size = length(data) + offset
+ end
return buf
end
+get_offset(io::GenericIOBuffer) = max(0, io.offset_or_compacted)
+get_compacted(io::GenericIOBuffer) = max(0, -io.offset_or_compacted)
+
# allocate Vector{UInt8}s for IOBuffer storage that can efficiently become Strings
StringMemory(n::Integer) = unsafe_wrap(Memory{UInt8}, _string_n(n))
StringVector(n::Integer) = wrap(Array, StringMemory(n))
@@ -111,17 +234,11 @@ function IOBuffer(
truncate::Union{Bool,Nothing}=nothing,
maxsize::Integer=typemax(Int),
sizehint::Union{Integer,Nothing}=nothing)
- if maxsize < 0
- throw(ArgumentError("negative maxsize"))
- end
if sizehint !== nothing
sizehint!(data, sizehint)
end
flags = open_flags(read=read, write=write, append=append, truncate=truncate)
- buf = GenericIOBuffer(data, flags.read, flags.write, true, flags.append, Int(maxsize))
- if flags.truncate
- buf.size = buf.offset
- end
+ buf = GenericIOBuffer(data, flags.read, flags.write, true, flags.append, maxsize, flags.truncate)
return buf
end
@@ -131,17 +248,23 @@ function IOBuffer(;
append::Union{Bool,Nothing}=nothing,
truncate::Union{Bool,Nothing}=true,
maxsize::Integer=typemax(Int),
- sizehint::Union{Integer,Nothing}=nothing)
- size = sizehint !== nothing ? Int(sizehint) : maxsize != typemax(Int) ? Int(maxsize) : 32
+ sizehint::Union{Integer,Nothing}=nothing,
+ )
+ mz = Int(maxsize)::Int
+ if mz < 0
+ throw(ArgumentError("negative maxsize"))
+ end
+ size = if sizehint !== nothing
+ # Allow negative sizehint, just like `sizehint!` does
+ min(mz, max(0, Int(sizehint)::Int))
+ else
+ min(mz, 32)
+ end
flags = open_flags(read=read, write=write, append=append, truncate=truncate)
- buf = IOBuffer(
- StringMemory(size),
- read=flags.read,
- write=flags.write,
- append=flags.append,
- truncate=flags.truncate,
- maxsize=maxsize)
- fill!(buf.data, 0)
+ # A common usecase of IOBuffer is to incrementally construct strings. By using StringMemory
+ # as the default storage, we can turn the result into a string without copying.
+ buf = _new_generic_iobuffer(Memory{UInt8}, StringMemory(size), flags.read, flags.write, true, flags.append, mz)
+ buf.size = 0
return buf
end
@@ -158,21 +281,53 @@ If `data` is given, creates a `PipeBuffer` to operate on a data vector,
optionally specifying a size beyond which the underlying `Array` may not be grown.
"""
PipeBuffer(data::AbstractVector{UInt8}=Memory{UInt8}(); maxsize::Int = typemax(Int)) =
- GenericIOBuffer(data, true, true, false, true, maxsize)
+ GenericIOBuffer(data, true, true, false, true, maxsize, false)
PipeBuffer(maxsize::Integer) = (x = PipeBuffer(StringMemory(maxsize), maxsize = maxsize); x.size = 0; x)
+# Internal method where truncation IS supported
+function _truncated_pipebuffer(data::AbstractVector{UInt8}=Memory{UInt8}(); maxsize::Int = typemax(Int))
+ buf = PipeBuffer(data)
+ buf.size = get_offset(buf)
+ buf.maxsize = maxsize
+ buf
+end
+
_similar_data(b::GenericIOBuffer, len::Int) = similar(b.data, len)
_similar_data(b::IOBuffer, len::Int) = StringMemory(len)
-function copy(b::GenericIOBuffer)
- ret = typeof(b)(b.reinit ? _similar_data(b, 0) : b.writable ?
- copyto!(_similar_data(b, length(b.data)), b.data) : b.data,
- b.readable, b.writable, b.seekable, b.append, b.maxsize)
- ret.size = b.size
- ret.ptr = b.ptr
- ret.mark = b.mark
- ret.offset = b.offset
- return ret
+# Note: Copying may change the value of the position (and mark) for un-seekable streams.
+# However, these values are not stable anyway due to compaction.
+
+function copy(b::GenericIOBuffer{T}) where T
+ if b.reinit
+ # If buffer is used up, allocate a new size-zero buffer
+ # Reinit implies writable, and that ptr, size, offset and mark are already the default values
+ return typeof(b)(_similar_data(b, 0), b.readable, b.writable, b.seekable, b.append, b.maxsize, false)
+ elseif b.writable
+ # Else, we just copy the reachable bytes. If buffer is seekable, all bytes
+ # after offset are reachable, since they can be seeked to
+ used_span = get_used_span(b)
+ compacted = first(used_span) - get_offset(b) - 1
+ len = length(used_span)
+ data = copyto!(_similar_data(b, len), view(b.data, used_span))
+ ret = typeof(b)(data, b.readable, b.writable, b.seekable, b.append, b.maxsize, false)
+ ret.size = len
+ # Copying data over implicitly compacts, and may add compaction
+ ret.offset_or_compacted = -get_compacted(b) - compacted
+ ret.ptr = b.ptr - first(used_span) + 1
+ ret.mark = b.mark
+ return ret
+ else
+ # When the buffer is just readable, they can share the same data, so we just make
+ # a shallow copy of the IOBuffer struct.
+ # Use internal constructor because we want to allow b.maxsize to be larger than data,
+ # in case that is the case for `b`.
+ ret = _new_generic_iobuffer(T, b.data, b.readable, b.writable, b.seekable, b.append, b.maxsize)
+ ret.offset_or_compacted = b.offset_or_compacted
+ ret.ptr = b.ptr
+ ret.mark = b.mark
+ return ret
+ end
end
show(io::IO, b::GenericIOBuffer) = print(io, "IOBuffer(data=UInt8[...], ",
@@ -180,9 +335,9 @@ show(io::IO, b::GenericIOBuffer) = print(io, "IOBuffer(data=UInt8[...], ",
"writable=", b.writable, ", ",
"seekable=", b.seekable, ", ",
"append=", b.append, ", ",
- "size=", b.size - b.offset, ", ",
+ "size=", b.size - get_offset(b), ", ",
"maxsize=", b.maxsize == typemax(Int) ? "Inf" : b.maxsize, ", ",
- "ptr=", b.ptr - b.offset, ", ",
+ "ptr=", b.ptr - get_offset(b), ", ",
"mark=", b.mark, ")")
@noinline function _throw_not_readable()
@@ -192,7 +347,7 @@ end
function unsafe_read(from::GenericIOBuffer, p::Ptr{UInt8}, nb::UInt)
from.readable || _throw_not_readable()
- avail = bytesavailable(from)
+ avail = bytesavailable(from) % UInt
adv = min(avail, nb)
unsafe_read!(p, from.data, from.ptr, adv)
from.ptr += adv
@@ -221,7 +376,45 @@ function unsafe_read!(dest::Ptr{UInt8}, src::DenseBytes, so::Integer, nbytes::UI
nothing
end
-function peek(from::GenericIOBuffer, T::Union{Type{Int16},Type{UInt16},Type{Int32},Type{UInt32},Type{Int64},Type{UInt64},Type{Int128},Type{UInt128},Type{Float16},Type{Float32},Type{Float64}})
+const MultiByteBitNumberType = Union{
+ Type{UInt16},
+ Type{Int16},
+ Type{UInt32},
+ Type{Int32},
+ Type{UInt64},
+ Type{Int64},
+ Type{UInt128},
+ Type{Int128},
+ Type{Float16},
+ Type{Float32},
+ Type{Float64},
+}
+
+function load_from_array(T::MultiByteBitNumberType, data::AbstractArray{UInt8}, from::Int)
+ x = if T <: AbstractFloat
+ uinttype(T)(0)
+ else
+ unsigned(T)(0)
+ end
+ for i in 0:sizeof(x)-1
+ x |= typeof(x)(data[from + i]) << (8 * i)
+ end
+ reinterpret(T, ltoh(x))
+end
+
+function peek(from::GenericIOBuffer, T::MultiByteBitNumberType)
+ from.readable || _throw_not_readable()
+ avail = bytesavailable(from)
+ nb = sizeof(T)
+ if nb > avail
+ throw(EOFError())
+ end
+ return load_from_array(T, from.data, from.ptr)
+end
+
+# This method can use a pointer, since the underlying buffer is dense
+# and memory backed
+function peek(from::GenericIOBuffer{<:MutableDenseArrayType}, T::MultiByteBitNumberType)
from.readable || _throw_not_readable()
avail = bytesavailable(from)
nb = sizeof(T)
@@ -235,29 +428,12 @@ function peek(from::GenericIOBuffer, T::Union{Type{Int16},Type{UInt16},Type{Int3
return x
end
-function read(from::GenericIOBuffer, T::Union{Type{Int16},Type{UInt16},Type{Int32},Type{UInt32},Type{Int64},Type{UInt64},Type{Int128},Type{UInt128},Type{Float16},Type{Float32},Type{Float64}})
+function read(from::GenericIOBuffer, T::MultiByteBitNumberType)
x = peek(from, T)
from.ptr += sizeof(T)
return x
end
-function read_sub(from::GenericIOBuffer, a::AbstractArray{T}, offs, nel) where T
- require_one_based_indexing(a)
- from.readable || _throw_not_readable()
- if offs+nel-1 > length(a) || offs < 1 || nel < 0
- throw(BoundsError())
- end
- if isa(a, MutableDenseArrayType{UInt8})
- nb = UInt(nel * sizeof(T))
- GC.@preserve a unsafe_read(from, pointer(a, offs), nb)
- else
- for i = offs:offs+nel-1
- a[i] = read(from, T)
- end
- end
- return a
-end
-
@inline function read(from::GenericIOBuffer, ::Type{UInt8})
from.readable || _throw_not_readable()
ptr = from.ptr
@@ -283,18 +459,33 @@ read(from::GenericIOBuffer, ::Type{Ptr{T}}) where {T} = convert(Ptr{T}, read(fro
isreadable(io::GenericIOBuffer) = io.readable
iswritable(io::GenericIOBuffer) = io.writable
-filesize(io::GenericIOBuffer) = (io.seekable ? io.size - io.offset : bytesavailable(io))
+# Number of bytes that can be read from the buffer, if you seek to the start first.
+filesize(io::GenericIOBuffer) = (io.seekable ? io.size - get_offset(io) : bytesavailable(io))
+
+# Number of bytes that can be read from the buffer.
bytesavailable(io::GenericIOBuffer) = io.size - io.ptr + 1
-position(io::GenericIOBuffer) = io.ptr - io.offset - 1
+
+# TODO: Document that position for an unmarked and unseekable stream is invalid (and make it error?)
+function position(io::GenericIOBuffer)
+ # Position is zero-indexed, but ptr is one-indexed, hence the -1
+ io.ptr - io.offset_or_compacted - 1
+end
function skip(io::GenericIOBuffer, n::Integer)
skip(io, clamp(n, Int))
end
+
function skip(io::GenericIOBuffer, n::Int)
+ # In both cases, the result will never go to before the first position,
+ # nor beyond the last position, and will not throw an error unless the stream
+ # is not seekable and try to skip a negative number of bytes.
if signbit(n)
+ # Skipping a negative number of bytes is equivalent to seeking backwards.
seekto = clamp(widen(position(io)) + widen(n), Int)
seek(io, seekto) # Does error checking
else
+ # Don't use seek in order to allow a non-seekable IO to still skip bytes.
+ # Handle overflow.
n_max = io.size + 1 - io.ptr
io.ptr += min(n, n_max)
io
@@ -304,16 +495,30 @@ end
function seek(io::GenericIOBuffer, n::Integer)
seek(io, clamp(n, Int))
end
+
+function translate_seek_position(io::GenericIOBuffer, n::Int)
+ # If there is an offset (the field F is positive), then there are F unused bytes at the beginning
+ # of the data, and we need to seek to n + F + 1. (Also compensate for `seek` being zero-
+ # indexed)
+
+ # If bytes has been compacted (field F is negative), then F bytes has been deleted from
+ # the buffer, and a virtual position n means a position n + F in the data.
+ # Remember that F is negative, so n + F is subtracting from n. So we also end up with
+ # n + F + 1.
+ clamp(widen(n) + widen(io.offset_or_compacted) + widen(1), Int)
+end
+
function seek(io::GenericIOBuffer, n::Int)
if !io.seekable
ismarked(io) || throw(ArgumentError("seek failed, IOBuffer is not seekable and is not marked"))
n == io.mark || throw(ArgumentError("seek failed, IOBuffer is not seekable and n != mark"))
end
+
# TODO: REPL.jl relies on the fact that this does not throw (by seeking past the beginning or end
# of an GenericIOBuffer), so that would need to be fixed in order to throw an error here
- #(n < 0 || n > io.size - io.offset) && throw(ArgumentError("Attempted to seek outside IOBuffer boundaries."))
- #io.ptr = n + io.offset + 1
- io.ptr = clamp(n, 0, io.size - io.offset) + io.offset + 1
+ max_ptr = io.size + 1
+ min_ptr = get_offset(io) + 1
+ io.ptr = clamp(translate_seek_position(io, n), min_ptr, max_ptr)
return io
end
@@ -322,113 +527,163 @@ function seekend(io::GenericIOBuffer)
return io
end
-# choose a resize strategy based on whether `resize!` is defined:
-# for a Vector, we use `resize!`, but for most other types,
-# this calls `similar`+copy
-function _resize!(io::GenericIOBuffer, sz::Int)
- a = io.data
- offset = io.offset
- if applicable(resize!, a, sz)
- if offset != 0
- size = io.size
- size > offset && copyto!(a, 1, a, offset + 1, min(sz, size - offset))
- io.ptr -= offset
- io.size -= offset
- io.offset = 0
- end
- resize!(a, sz)
+# Resize the io's data to `new_size`, which must not be > io.maxsize.
+# Use `resize!` if the data supports it, else reallocate a new one and
+# copy the old data over.
+# If not `exact` and resizing is not supported, overallocate in order to
+# prevent excessive resizing.
+function _resize!(io::GenericIOBuffer, new_size::Int, exact::Bool)
+ old_data = io.data
+ if applicable(resize!, old_data, new_size)
+ resize!(old_data, new_size)
else
- size = io.size
- if size >= sz && sz != 0
- b = a
- else
- b = _similar_data(io, sz == 0 ? 0 : max(overallocation(size - io.offset), sz))
- end
- size > offset && copyto!(b, 1, a, offset + 1, min(sz, size - offset))
- io.data = b
- io.ptr -= offset
- io.size -= offset
- io.offset = 0
+ new_size = exact ? new_size : min(io.maxsize, overallocation(new_size))
+ used_span = get_used_span(io)
+ deleted = first(used_span) - 1
+ compacted = deleted - get_offset(io)
+ new_data = _similar_data(io, new_size)
+ io.data = new_data
+ iszero(new_size) && return io
+ len_used = length(used_span)
+ iszero(len_used) || copyto!(new_data, 1, old_data, first(used_span), len_used)
+ # Copying will implicitly compact, and so compaction must be updated
+ io.offset_or_compacted = -get_compacted(io) - compacted
+ io.ptr -= deleted
+ io.size = len_used
end
return io
end
function truncate(io::GenericIOBuffer, n::Integer)
io.writable || throw(ArgumentError("truncate failed, IOBuffer is not writeable"))
+ # Non-seekable buffers can only be constructed with `PipeBuffer`, which is explicitly
+ # documented to not be truncatable.
io.seekable || throw(ArgumentError("truncate failed, IOBuffer is not seekable"))
n < 0 && throw(ArgumentError("truncate failed, n bytes must be ≥ 0, got $n"))
n > io.maxsize && throw(ArgumentError("truncate failed, $(n) bytes is exceeds IOBuffer maxsize $(io.maxsize)"))
- n = Int(n)
+ n = Int(n)::Int
+ offset = get_offset(io)
+ current_size = io.size - offset
if io.reinit
- io.data = _similar_data(io, n)
+ # If reinit, we don't need to truncate anything but just reinitializes
+ # the buffer with zeros. Mark, ptr and offset has already been reset.
+ io.data = fill!(_similar_data(io, n), 0x00)
io.reinit = false
- elseif n > length(io.data) + io.offset
- _resize!(io, n)
- end
- ismarked(io) && io.mark > n && unmark(io)
- n += io.offset
- io.data[io.size+1:n] .= 0
- io.size = n
- io.ptr = min(io.ptr, n+1)
+ io.size = n
+ elseif n < current_size
+ # Else, if we need to shrink the iobuffer, we simply change the pointers without
+ # actually shrinking the underlying storage, or copying data.
+
+ # Clear the mark if it points to data that has now been deleted.
+ if translate_seek_position(io, io.mark) > n+offset
+ io.mark = -1
+ end
+ io.size = n + offset
+ io.ptr = min(io.ptr, n + offset + 1)
+ elseif n > current_size
+ if n + offset > io.maxsize
+ compact!(io)
+ end
+ _resize!(io, n + get_offset(io), false)
+ fill!(view(io.data, io.size + 1:min(length(io.data), n + get_offset(io))), 0x00)
+ io.size = min(length(io.data), n + get_offset(io))
+ end
return io
end
-function compact(io::GenericIOBuffer)
- io.writable || throw(ArgumentError("compact failed, IOBuffer is not writeable"))
- io.seekable && throw(ArgumentError("compact failed, IOBuffer is seekable"))
- io.reinit && return
- local ptr::Int, bytes_to_move::Int
- if ismarked(io) && io.mark < position(io)
- io.mark == 0 && return
- ptr = io.mark + io.offset
- bytes_to_move = bytesavailable(io) + (io.ptr - ptr)
- else
- ptr = io.ptr
- bytes_to_move = bytesavailable(io)
+# Ensure that the buffer has room for at least `nshort` more bytes, except when
+# doing that would exceed maxsize.
+@inline ensureroom(io::GenericIOBuffer, nshort::Int) = ensureroom(io, UInt(nshort))
+
+@inline function ensureroom(io::GenericIOBuffer, nshort::UInt)
+ # If the IO is not writable, we call the slow path only to error.
+ # If reinit, the data has been handed out to the user, and the IOBuffer
+ # no longer controls it, so we need to allocate a new one.
+ if !io.writable || io.reinit
+ return ensureroom_reallocate(io, nshort)
+ end
+ # The fast path here usually checks there is already room, then does nothing.
+ # When append is true, new data is added after io.size, not io.ptr
+ existing_space = min(lastindex(io.data), io.maxsize + get_offset(io)) - (io.append ? io.size : io.ptr - 1)
+ if existing_space < nshort % Int
+ # Outline this function to make it more likely that ensureroom inlines itself
+ return ensureroom_slowpath(io, nshort, existing_space)
end
- copyto!(io.data, 1, io.data, ptr, bytes_to_move)
- io.size -= ptr - 1
- io.ptr -= ptr - 1
- io.offset = 0
- return
+ return io
end
-@noinline function ensureroom_slowpath(io::GenericIOBuffer, nshort::UInt)
+# Throw error (placed in this function to outline it) or reinit the buffer
+@noinline function ensureroom_reallocate(io::GenericIOBuffer, nshort::UInt)
io.writable || throw(ArgumentError("ensureroom failed, IOBuffer is not writeable"))
- if io.reinit
- io.data = _similar_data(io, nshort % Int)
- io.reinit = false
- end
- if !io.seekable
- if !ismarked(io) && io.ptr > io.offset+1 && io.size <= io.ptr - 1
- io.ptr = 1
- io.size = 0
- io.offset = 0
- else
- datastart = (ismarked(io) ? io.mark : io.ptr - io.offset)
- if (io.size-io.offset+nshort > io.maxsize) ||
- (datastart > 4096 && datastart > io.size - io.ptr) ||
- (datastart > 262144)
- # apply somewhat arbitrary heuristics to decide when to destroy
- # old, read data to make more room for new data
- compact(io)
- end
+ io.data = _similar_data(io, min(io.maxsize, nshort % Int))
+ io.reinit = false
+ io.offset_or_compacted = -get_compacted(io)
+ return io
+end
+
+# Here, we already know there is not enough room at the end of the io's data.
+@noinline function ensureroom_slowpath(io::GenericIOBuffer, nshort::UInt, available_bytes::Int)
+ reclaimable_bytes = first(get_used_span(io)) - 1
+ # Avoid resizing and instead compact the buffer, only if we gain enough bytes from
+ # doing so (at least 32 bytes and 1/8th of the data length). Also, if we would have
+ # to resize anyway, there would be no point in compacting, so also check that.
+ if (
+ reclaimable_bytes ≥ 32 &&
+ reclaimable_bytes ≥ length(io.data) >>> 3 &&
+ (reclaimable_bytes + available_bytes) % UInt ≥ nshort
+ )
+ compact!(io)
+ return io
+ end
+
+ desired_size = length(io.data) + Int(nshort) - available_bytes
+ if desired_size > io.maxsize
+ # If we can't fit all the requested data in the new buffer, we need to
+ # fit as much as possible, so we must compact
+ if !iszero(reclaimable_bytes)
+ desired_size -= compact!(io)
+ end
+ # Max out the buffer size if we want more than the buffer size
+ if length(io.data) < io.maxsize
+ _resize!(io, io.maxsize, true)
end
+ else
+ # Else, we request only the requested size, but set `exact` to `false`,
+ # in order to overallocate to avoid growing the buffer by too little
+ _resize!(io, desired_size, false)
end
- return
+
+ return io
end
-@inline ensureroom(io::GenericIOBuffer, nshort::Int) = ensureroom(io, UInt(nshort))
-@inline function ensureroom(io::GenericIOBuffer, nshort::UInt)
- if !io.writable || (!io.seekable && io.ptr > io.offset+1) || io.reinit
- ensureroom_slowpath(io, nshort)
- end
- n = min((nshort % Int) + (io.append ? io.size : io.ptr-1) - io.offset, io.maxsize)
- l = length(io.data) + io.offset
- if n > l
- _resize!(io, Int(n))
+# Get the indices in data which cannot be deleted
+function get_used_span(io::IOBuffer)
+ # A seekable buffer can recover data before ptr
+ return if io.seekable
+ get_offset(io) + 1 : io.size
+ # If non-seekable, the mark can be used to recover data before ptr,
+ # so data at the mark and after must also be saved
+ elseif io.mark > -1
+ min(io.ptr, translate_seek_position(io, io.mark)) : io.size
+ else
+ io.ptr : io.size
end
- return io
+end
+
+# Delete any offset, and also compact data if buffer is not seekable.
+# Return the number of bytes deleted
+function compact!(io::GenericIOBuffer)::Int
+ offset = get_offset(io)
+ used_span = get_used_span(io)
+ deleted = first(used_span) - 1
+ compacted = deleted - offset
+ iszero(deleted) && return 0
+ data = io.data
+ copyto!(data, 1, data, deleted + 1, length(used_span))
+ io.offset_or_compacted = -get_compacted(io) - compacted
+ io.ptr -= deleted
+ io.size -= deleted
+ return deleted
end
eof(io::GenericIOBuffer) = (io.ptr - 1 >= io.size)
@@ -439,17 +694,17 @@ function closewrite(io::GenericIOBuffer)
end
@noinline function close(io::GenericIOBuffer{T}) where T
+ if io.writable && !io.reinit
+ _resize!(io, 0, true)
+ end
io.readable = false
io.writable = false
io.seekable = false
io.size = 0
- io.offset = 0
io.maxsize = 0
io.ptr = 1
io.mark = -1
- if io.writable && !io.reinit
- io.data = _resize!(io, 0)
- end
+ io.offset_or_compacted = -get_compacted(io)
nothing
end
@@ -472,31 +727,42 @@ julia> String(take!(io))
```
"""
function take!(io::GenericIOBuffer)
- ismarked(io) && unmark(io)
+ io.mark = -1
if io.seekable
- nbytes = io.size - io.offset
- data = copyto!(StringVector(nbytes), 1, io.data, io.offset + 1, nbytes)
+ # If the buffer is seekable, then the previously consumed bytes from ptr+1:size
+ # must still be output, as they are not truly gone.
+ # Hence, we output all bytes from 1:io.size
+ offset = get_offset(io)
+ nbytes = io.size - offset
+ data = copyto!(StringVector(nbytes), 1, io.data, offset + 1, nbytes)
else
+ # Else, if not seekable, bytes from 1:ptr-1 are truly gone and should not
+ # be output. Hence, we output `bytesavailable`, which is ptr:size
nbytes = bytesavailable(io)
data = read!(io, StringVector(nbytes))
end
if io.writable
+ io.reinit = true
io.ptr = 1
io.size = 0
- io.offset = 0
+ io.offset_or_compacted = 0
end
return data
end
+
+# This method is specialized because we know the underlying data is a Memory, so we can
+# e.g. wrap directly in an array without copying. Otherwise the logic is the same as
+# the generic method
function take!(io::IOBuffer)
- ismarked(io) && unmark(io)
+ io.mark = -1
if io.seekable
nbytes = filesize(io)
if nbytes == 0 || io.reinit
data = StringVector(0)
elseif io.writable
- data = wrap(Array, memoryref(io.data, io.offset + 1), nbytes)
+ data = wrap(Array, memoryref(io.data, get_offset(io) + 1), nbytes)
else
- data = copyto!(StringVector(nbytes), 1, io.data, io.offset + 1, nbytes)
+ data = copyto!(StringVector(nbytes), 1, io.data, get_offset(io) + 1, nbytes)
end
else
nbytes = bytesavailable(io)
@@ -512,7 +778,7 @@ function take!(io::IOBuffer)
io.reinit = true
io.ptr = 1
io.size = 0
- io.offset = 0
+ io.offset_or_compacted = 0
end
return data
end
@@ -529,46 +795,79 @@ state. This should only be used internally for performance-critical
It might save an allocation compared to `take!` (if the compiler elides the
Array allocation), as well as omits some checks.
"""
-_unsafe_take!(io::IOBuffer) =
- wrap(Array, io.size == io.offset ?
- memoryref(Memory{UInt8}()) :
- memoryref(io.data, io.offset + 1),
- io.size - io.offset)
+function _unsafe_take!(io::IOBuffer)
+ offset = get_offset(io)
+ mem = if io.size == offset
+ memoryref(Memory{UInt8}())
+ else
+ memoryref(io.data, offset + 1)
+ end
+ wrap(Array, mem, io.size - offset)
+end
function write(to::IO, from::GenericIOBuffer)
- written::Int = bytesavailable(from)
+ # This would cause an infinite loop, as it should read until the end, but more
+ # data is being written into it continuously.
if to === from
- from.ptr = from.size + 1
+ throw(ArgumentError("Writing all content fron an IOBuffer into itself in invalid"))
else
- written = GC.@preserve from unsafe_write(to, pointer(from.data, from.ptr), UInt(written))
- from.ptr += written
+ available = bytesavailable(from)
+ written = GC.@preserve from unsafe_write(to, pointer(from.data, from.ptr), UInt(available))
+ from.ptr = from.size + 1
end
return written
end
function unsafe_write(to::GenericIOBuffer, p::Ptr{UInt8}, nb::UInt)
ensureroom(to, nb)
- ptr = (to.append ? to.size+1 : to.ptr)
- written = Int(min(nb, Int(length(to.data))::Int - ptr + 1))
- towrite = written
- d = to.data
- while towrite > 0
- @inbounds d[ptr] = unsafe_load(p)
- ptr += 1
+ size = to.size
+ append = to.append
+ ptr = append ? size+1 : to.ptr
+ data = to.data
+ to_write = min(nb, (min(Int(length(data))::Int, to.maxsize + get_offset(to)) - ptr + 1) % UInt) % Int
+ # Dispatch based on the type of data, to possibly allow using memcpy
+ _unsafe_write(data, p, ptr, to_write % UInt)
+ # Update to.size only if the ptr has advanced to higher than
+ # the previous size. Otherwise, we just overwrote existing data
+ to.size = max(size, ptr + to_write - 1)
+ # If to.append, we only update size, not ptr.
+ if !append
+ to.ptr = ptr + to_write
+ end
+ return to_write
+end
+
+@inline function _unsafe_write(data::AbstractVector{UInt8}, p::Ptr{UInt8}, from::Int, nb::UInt)
+ for i in 0:nb-1
+ data[from + i] = unsafe_load(p)
p += 1
- towrite -= 1
end
- to.size = max(to.size, ptr - 1)
- if !to.append
- to.ptr += written
+end
+
+@inline function _unsafe_write(data::MutableDenseArrayType{UInt8}, p::Ptr{UInt8}, from::Int, nb::UInt)
+ # Calling `unsafe_copyto!` is very efficient for large arrays, but has some overhead
+ # for small (< 5 bytes) arrays.
+ # Since a common use case of IOBuffer is to construct strings incrementally, often
+ # one char at a time, it's crucial to be fast in the case of small arrays.
+ # This optimization only gives a minor 10% speed boost in the best case.
+ if nb < 5
+ @inbounds for i in UInt(1):nb
+ data[from + (i % Int) - 1] = unsafe_load(p, i)
+ end
+ else
+ GC.@preserve data begin
+ ptr = Ptr{UInt8}(pointer(data, from))::Ptr{UInt8}
+ @inline unsafe_copyto!(ptr, p, nb)
+ end
end
- return written
end
@inline function write(to::GenericIOBuffer, a::UInt8)
ensureroom(to, UInt(1))
ptr = (to.append ? to.size+1 : to.ptr)
- if ptr > to.maxsize
+ # We have just ensured there is room for 1 byte, EXCEPT if we were to exceed
+ # maxsize. So, we just need to check that here.
+ if ptr > to.maxsize + get_offset(to)
return 0
else
to.data[ptr] = a
@@ -581,31 +880,26 @@ end
end
readbytes!(io::GenericIOBuffer, b::MutableDenseArrayType{UInt8}, nb=length(b)) = readbytes!(io, b, Int(nb))
+
function readbytes!(io::GenericIOBuffer, b::MutableDenseArrayType{UInt8}, nb::Int)
- nr = min(nb, bytesavailable(io))
- if length(b) < nr
- resize!(b, nr)
+ io.readable || _throw_not_readable()
+ to_read = min(nb, bytesavailable(io))
+ if length(b) < to_read
+ resize!(b, to_read)
end
- read_sub(io, b, 1, nr)
- return nr
+ checkbounds(b, 1:to_read)
+ GC.@preserve b unsafe_read(io, pointer(b), to_read)
+ to_read
end
read(io::GenericIOBuffer) = read!(io, StringVector(bytesavailable(io)))
+
+# For IO buffers, all the data is immediately available.
readavailable(io::GenericIOBuffer) = read(io)
-read(io::GenericIOBuffer, nb::Integer) = read!(io, StringVector(min(nb, bytesavailable(io))))
-function occursin(delim::UInt8, buf::IOBuffer)
- p = pointer(buf.data, buf.ptr)
- q = GC.@preserve buf ccall(:memchr, Ptr{UInt8}, (Ptr{UInt8}, Int32, Csize_t), p, delim, bytesavailable(buf))
- return q != C_NULL
-end
+read(io::GenericIOBuffer, nb::Integer) = read!(io, StringVector(min(nb, bytesavailable(io))))
function occursin(delim::UInt8, buf::GenericIOBuffer)
- data = buf.data
- for i = buf.ptr:buf.size
- @inbounds b = data[i]
- b == delim && return true
- end
- return false
+ return in(delim, view(buf.data, buf.ptr:buf.size))
end
function copyuntil(out::IO, io::GenericIOBuffer, delim::UInt8; keep::Bool=false)
@@ -622,28 +916,53 @@ function copyuntil(out::IO, io::GenericIOBuffer, delim::UInt8; keep::Bool=false)
end
function copyline(out::GenericIOBuffer, s::IO; keep::Bool=false)
- copyuntil(out, s, 0x0a, keep=true)
- line = out.data
- i = out.size # XXX: this is only correct for appended data. if the data was inserted, only ptr should change
- if keep || i == out.offset || line[i] != 0x0a
+ # If the data is copied into the middle of the buffer of `out` instead of appended to the end,
+ # and !keep, and the line copied ends with \r\n, then the copyuntil (even if keep=false)
+ # will overwrite one too many bytes with the new \r byte.
+ # Work around this by making a new temporary buffer.
+ # Could perhaps be done better
+ if !out.append && out.ptr < out.size + 1
+ newbuf = IOBuffer()
+ copyuntil(newbuf, s, 0x0a, keep=true)
+ v = take!(newbuf)
+ # Remove \r\n or \n if present
+ if !keep
+ if length(v) > 1 && last(v) == UInt8('\n')
+ pop!(v)
+ end
+ if length(v) > 1 && last(v) == UInt8('\r')
+ pop!(v)
+ end
+ end
+ write(out, v)
return out
- elseif i < 2 || line[i-1] != 0x0d
- i -= 1
else
- i -= 2
- end
- out.size = i
- if !out.append
- out.ptr = i+1
+ # Else, we can just copy the data directly into the buffer, and then
+ # subtract the last one or two bytes depending on `keep`.
+ copyuntil(out, s, 0x0a, keep=true)
+ line = out.data
+ i = out.size
+ if keep || i == out.offset_or_compacted || line[i] != 0x0a
+ return out
+ elseif i < 2 || line[i-1] != 0x0d
+ i -= 1
+ else
+ i -= 2
+ end
+ out.size = i
+ if !out.append
+ out.ptr = i+1
+ end
+ return out
end
- return out
end
function _copyline(out::IO, io::GenericIOBuffer; keep::Bool=false)
data = view(io.data, io.ptr:io.size)
# note: findfirst + copyto! is much faster than a single loop
# except for nout ≲ 20. A single loop is 2x faster for nout=5.
- nout = nread = something(findfirst(==(0x0a), data), length(data))
+ nout = nread = something(findfirst(==(0x0a), data), length(data))::Int
+ # Remove the 0x0a (newline) if not keep, and also remove the 0x0d (\r) if it is there
if !keep && nout > 0 && data[nout] == 0x0a
nout -= 1
nout > 0 && data[nout] == 0x0d && (nout -= 1)
@@ -652,6 +971,7 @@ function _copyline(out::IO, io::GenericIOBuffer; keep::Bool=false)
io.ptr += nread
return out
end
+
copyline(out::IO, io::GenericIOBuffer; keep::Bool=false) = _copyline(out, io; keep)
copyline(out::GenericIOBuffer, io::GenericIOBuffer; keep::Bool=false) = _copyline(out, io; keep)
diff --git a/base/irrationals.jl b/base/irrationals.jl
index 76222997865c0..f86b55e4faaa7 100644
--- a/base/irrationals.jl
+++ b/base/irrationals.jl
@@ -60,20 +60,40 @@ AbstractFloat(x::AbstractIrrational) = Float64(x)::Float64
Float16(x::AbstractIrrational) = Float16(Float32(x)::Float32)
Complex{T}(x::AbstractIrrational) where {T<:Real} = Complex{T}(T(x))
-function _irrational_to_rational(::Type{T}, x::AbstractIrrational) where T<:Integer
- o = precision(BigFloat)
+function _irrational_to_rational_at_current_precision(::Type{T}, x::AbstractIrrational) where {T <: Integer}
+ bx = BigFloat(x)
+ r = rationalize(T, bx, tol = 0)
+ if abs(BigFloat(r) - bx) > eps(bx)
+ r
+ else
+ nothing # Error is too small, repeat with greater precision.
+ end
+end
+function _irrational_to_rational_at_precision(::Type{T}, x::AbstractIrrational, p::Int) where {T <: Integer}
+ f = let x = x
+ () -> _irrational_to_rational_at_current_precision(T, x)
+ end
+ setprecision(f, BigFloat, p)
+end
+function _irrational_to_rational_at_current_rounding_mode(::Type{T}, x::AbstractIrrational) where {T <: Integer}
+ if T <: BigInt
+ _throw_argument_error_irrational_to_rational_bigint() # avoid infinite loop
+ end
p = 256
while true
- setprecision(BigFloat, p)
- bx = BigFloat(x)
- r = rationalize(T, bx, tol=0)
- if abs(BigFloat(r) - bx) > eps(bx)
- setprecision(BigFloat, o)
+ r = _irrational_to_rational_at_precision(T, x, p)
+ if r isa Number
return r
end
p += 32
end
end
+function _irrational_to_rational(::Type{T}, x::AbstractIrrational) where {T <: Integer}
+ f = let x = x
+ () -> _irrational_to_rational_at_current_rounding_mode(T, x)
+ end
+ setrounding(f, BigFloat, RoundNearest)
+end
Rational{T}(x::AbstractIrrational) where {T<:Integer} = _irrational_to_rational(T, x)
_throw_argument_error_irrational_to_rational_bigint() = throw(ArgumentError("Cannot convert an AbstractIrrational to a Rational{BigInt}: use rationalize(BigInt, x) instead"))
Rational{BigInt}(::AbstractIrrational) = _throw_argument_error_irrational_to_rational_bigint()
diff --git a/base/iterators.jl b/base/iterators.jl
index d6367ed8d996e..c7450781c4928 100644
--- a/base/iterators.jl
+++ b/base/iterators.jl
@@ -17,6 +17,7 @@ using .Base:
any, _counttuple, eachindex, ntuple, zero, prod, reduce, in, firstindex, lastindex,
tail, fieldtypes, min, max, minimum, zero, oneunit, promote, promote_shape, LazyString,
afoldl
+using Core
using Core: @doc
using .Base:
diff --git a/base/libc.jl b/base/libc.jl
index 7364f6e6677fe..fc0cc774cab7a 100644
--- a/base/libc.jl
+++ b/base/libc.jl
@@ -288,6 +288,8 @@ time(tm::TmStruct) = Float64(ccall(:mktime, Int, (Ref{TmStruct},), tm))
time() -> Float64
Get the system time in seconds since the epoch, with fairly high (typically, microsecond) resolution.
+
+See also [`time_ns`](@ref).
"""
time() = ccall(:jl_clock_now, Float64, ())
diff --git a/base/libdl.jl b/base/libdl.jl
index 199d847572ca4..de8c6a7a597c5 100644
--- a/base/libdl.jl
+++ b/base/libdl.jl
@@ -330,11 +330,10 @@ libfoo = LazyLibrary(LazyLibraryPath(prefix, "lib/libfoo.so.1.2.3"))
```
"""
struct LazyLibraryPath
- pieces::Vector
- LazyLibraryPath(pieces::Vector) = new(pieces)
+ pieces::Tuple{Vararg{Any}}
+ LazyLibraryPath(pieces...) = new(pieces)
end
-LazyLibraryPath(args...) = LazyLibraryPath(collect(args))
-Base.string(llp::LazyLibraryPath) = joinpath(string.(llp.pieces)...)::String
+Base.string(llp::LazyLibraryPath) = joinpath(String[string(p) for p in llp.pieces])
Base.cconvert(::Type{Cstring}, llp::LazyLibraryPath) = Base.cconvert(Cstring, string(llp))
# Define `print` so that we can wrap this in a `LazyString`
Base.print(io::IO, llp::LazyLibraryPath) = print(io, string(llp))
diff --git a/base/libuv.jl b/base/libuv.jl
index 306854e9f4436..35b1a9097293e 100644
--- a/base/libuv.jl
+++ b/base/libuv.jl
@@ -103,7 +103,8 @@ struverror(err::Int32) = unsafe_string(ccall(:uv_strerror, Cstring, (Int32,), er
uverrorname(err::Int32) = unsafe_string(ccall(:uv_err_name, Cstring, (Int32,), err))
uv_error(prefix::Symbol, c::Integer) = uv_error(string(prefix), c)
-uv_error(prefix::AbstractString, c::Integer) = c < 0 ? throw(_UVError(prefix, c)) : nothing
+uv_error(prefix::AbstractString, c::Integer) = c < 0 ? _uv_error(prefix, c) : nothing
+_uv_error(prefix::AbstractString, c::Integer) = throw(_UVError(prefix, c))
## event loop ##
diff --git a/base/linking.jl b/base/linking.jl
index 953d80c82cc42..f3dbe6abba3ec 100644
--- a/base/linking.jl
+++ b/base/linking.jl
@@ -3,15 +3,8 @@ module Linking
import Base.Libc: Libdl
-# inlined LLD_jll
-# These get calculated in __init__()
-const PATH = Ref("")
-const LIBPATH = Ref("")
-const PATH_list = String[]
-const LIBPATH_list = String[]
-const lld_path = Ref{String}()
+# from LLD_jll
const lld_exe = Sys.iswindows() ? "lld.exe" : "lld"
-const dsymutil_path = Ref{String}()
const dsymutil_exe = Sys.iswindows() ? "dsymutil.exe" : "dsymutil"
if Sys.iswindows()
@@ -47,61 +40,51 @@ function adjust_ENV!(env::Dict, PATH::String, LIBPATH::String, adjust_PATH::Bool
return env
end
-function __init_lld_path()
+const lld_path = OncePerProcess{String}() do
# Prefer our own bundled lld, but if we don't have one, pick it up off of the PATH
# If this is an in-tree build, `lld` will live in `tools`. Otherwise, it'll be in `private_libexecdir`
for bundled_lld_path in (joinpath(Sys.BINDIR, Base.PRIVATE_LIBEXECDIR, lld_exe),
joinpath(Sys.BINDIR, "..", "tools", lld_exe),
joinpath(Sys.BINDIR, lld_exe))
if isfile(bundled_lld_path)
- lld_path[] = abspath(bundled_lld_path)
- return
+ return abspath(bundled_lld_path)
end
end
- lld_path[] = something(Sys.which(lld_exe), lld_exe)
- return
+ return something(Sys.which(lld_exe), lld_exe)
end
-function __init_dsymutil_path()
- #Same as with lld but for dsymutil
+const dsymutil_path = OncePerProcess{String}() do
+ # Same as with lld but for dsymutil
for bundled_dsymutil_path in (joinpath(Sys.BINDIR, Base.PRIVATE_LIBEXECDIR, dsymutil_exe),
joinpath(Sys.BINDIR, "..", "tools", dsymutil_exe),
joinpath(Sys.BINDIR, dsymutil_exe))
if isfile(bundled_dsymutil_path)
- dsymutil_path[] = abspath(bundled_dsymutil_path)
- return
+ return abspath(bundled_dsymutil_path)
end
end
- dsymutil_path[] = something(Sys.which(dsymutil_exe), dsymutil_exe)
- return
+ return something(Sys.which(dsymutil_exe), dsymutil_exe)
end
-const VERBOSE = Ref{Bool}(false)
+PATH() = dirname(lld_path())
-function __init__()
- VERBOSE[] = something(Base.get_bool_env("JULIA_VERBOSE_LINKING", false), false)
-
- __init_lld_path()
- __init_dsymutil_path()
- PATH[] = dirname(lld_path[])
+const LIBPATH = OncePerProcess{String}() do
if Sys.iswindows()
# On windows, the dynamic libraries (.dll) are in Sys.BINDIR ("usr\\bin")
- append!(LIBPATH_list, [abspath(Sys.BINDIR, Base.LIBDIR, "julia"), Sys.BINDIR])
+ LIBPATH_list = [abspath(Sys.BINDIR, Base.LIBDIR, "julia"), Sys.BINDIR]
else
- append!(LIBPATH_list, [abspath(Sys.BINDIR, Base.LIBDIR, "julia"), abspath(Sys.BINDIR, Base.LIBDIR)])
+ LIBPATH_list = [abspath(Sys.BINDIR, Base.LIBDIR, "julia"), abspath(Sys.BINDIR, Base.LIBDIR)]
end
- LIBPATH[] = join(LIBPATH_list, pathsep)
- return
+ return join(LIBPATH_list, pathsep)
end
function lld(; adjust_PATH::Bool = true, adjust_LIBPATH::Bool = true)
- env = adjust_ENV!(copy(ENV), PATH[], LIBPATH[], adjust_PATH, adjust_LIBPATH)
- return Cmd(Cmd([lld_path[]]); env)
+ env = adjust_ENV!(copy(ENV), PATH(), LIBPATH(), adjust_PATH, adjust_LIBPATH)
+ return Cmd(Cmd([lld_path()]); env)
end
function dsymutil(; adjust_PATH::Bool = true, adjust_LIBPATH::Bool = true)
- env = adjust_ENV!(copy(ENV), PATH[], LIBPATH[], adjust_PATH, adjust_LIBPATH)
- return Cmd(Cmd([dsymutil_path[]]); env)
+ env = adjust_ENV!(copy(ENV), PATH(), LIBPATH(), adjust_PATH, adjust_LIBPATH)
+ return Cmd(Cmd([dsymutil_path()]); env)
end
function ld()
@@ -149,6 +132,8 @@ else
shlibdir() = libdir()
end
+verbose_linking() = something(Base.get_bool_env("JULIA_VERBOSE_LINKING", false), false)
+
function link_image_cmd(path, out)
PRIVATE_LIBDIR = "-L$(private_libdir())"
SHLIBDIR = "-L$(shlibdir())"
@@ -158,7 +143,7 @@ function link_image_cmd(path, out)
LIBS = (LIBS..., "-lopenlibm", "-lssp", "-lgcc_s", "-lgcc", "-lmsvcrt")
end
- V = VERBOSE[] ? "--verbose" : ""
+ V = verbose_linking() ? "--verbose" : ""
`$(ld()) $V $SHARED -o $out $WHOLE_ARCHIVE $path $NO_WHOLE_ARCHIVE $PRIVATE_LIBDIR $SHLIBDIR $LIBS`
end
diff --git a/base/loading.jl b/base/loading.jl
index 47741da26a1b2..ea2cf35395540 100644
--- a/base/loading.jl
+++ b/base/loading.jl
@@ -1243,21 +1243,7 @@ const TIMING_IMPORTS = Threads.Atomic{Int}(0)
# these return either the array of modules loaded from the path / content given
# or an Exception that describes why it couldn't be loaded
# and it reconnects the Base.Docs.META
-function _include_from_serialized(pkg::PkgId, path::String, ocachepath::Union{Nothing, String}, depmods::Vector{Any}, ignore_native::Union{Nothing,Bool}=nothing; register::Bool=true)
- if isnothing(ignore_native)
- if JLOptions().code_coverage == 0 && JLOptions().malloc_log == 0
- ignore_native = false
- else
- io = open(path, "r")
- try
- iszero(isvalid_cache_header(io)) && return ArgumentError("Incompatible header in cache file $path.")
- _, (includes, _, _), _, _, _, _, _, _ = parse_cache_header(io, path)
- ignore_native = pkg_tracked(includes)
- finally
- close(io)
- end
- end
- end
+function _include_from_serialized(pkg::PkgId, path::String, ocachepath::Union{Nothing, String}, depmods::Vector{Any}; register::Bool=true)
assert_havelock(require_lock)
timing_imports = TIMING_IMPORTS[] > 0
try
@@ -1276,6 +1262,7 @@ function _include_from_serialized(pkg::PkgId, path::String, ocachepath::Union{No
depmods[i] = dep
end
+ ignore_native = false
unlock(require_lock) # temporarily _unlock_ during these operations
sv = try
if ocachepath !== nothing
@@ -1438,7 +1425,6 @@ function run_module_init(mod::Module, i::Int=1)
end
function run_package_callbacks(modkey::PkgId)
- @assert modkey != precompilation_target
run_extension_callbacks(modkey)
assert_havelock(require_lock)
unlock(require_lock)
@@ -1568,7 +1554,7 @@ function _insert_extension_triggers(parent::PkgId, extensions::Dict{String, Any}
uuid_trigger = UUID(totaldeps[trigger]::String)
trigger_id = PkgId(uuid_trigger, trigger)
push!(trigger_ids, trigger_id)
- if !haskey(Base.loaded_modules, trigger_id) || haskey(package_locks, trigger_id) || (trigger_id == precompilation_target)
+ if !haskey(Base.loaded_modules, trigger_id) || haskey(package_locks, trigger_id)
trigger1 = get!(Vector{ExtensionId}, EXT_DORMITORY, trigger_id)
push!(trigger1, gid)
else
@@ -1581,7 +1567,6 @@ end
loading_extension::Bool = false
loadable_extensions::Union{Nothing,Vector{PkgId}} = nothing
precompiling_extension::Bool = false
-precompilation_target::Union{Nothing,PkgId} = nothing
function run_extension_callbacks(extid::ExtensionId)
assert_havelock(require_lock)
succeeded = try
@@ -1947,44 +1932,16 @@ function _tryrequire_from_serialized(modkey::PkgId, build_id::UInt128)
return ErrorException("Required dependency $modkey failed to load from a cache file.")
end
-# returns whether the package is tracked in coverage or malloc tracking based on
-# JLOptions and includes
-function pkg_tracked(includes)
- if JLOptions().code_coverage == 0 && JLOptions().malloc_log == 0
- return false
- elseif JLOptions().code_coverage == 1 || JLOptions().malloc_log == 1 # user
- # Just say true. Pkgimages aren't in Base
- return true
- elseif JLOptions().code_coverage == 2 || JLOptions().malloc_log == 2 # all
- return true
- elseif JLOptions().code_coverage == 3 || JLOptions().malloc_log == 3 # tracked path
- if JLOptions().tracked_path == C_NULL
- return false
- else
- tracked_path = unsafe_string(JLOptions().tracked_path)
- if isempty(tracked_path)
- return false
- else
- return any(includes) do inc
- startswith(inc.filename, tracked_path)
- end
- end
- end
- end
-end
-
# loads a precompile cache file, ignoring stale_cachefile tests
# load all dependent modules first
function _tryrequire_from_serialized(pkg::PkgId, path::String, ocachepath::Union{Nothing, String})
assert_havelock(require_lock)
local depmodnames
io = open(path, "r")
- ignore_native = false
try
iszero(isvalid_cache_header(io)) && return ArgumentError("Incompatible header in cache file $path.")
_, (includes, _, _), depmodnames, _, _, _, clone_targets, _ = parse_cache_header(io, path)
- ignore_native = pkg_tracked(includes)
pkgimage = !isempty(clone_targets)
if pkgimage
@@ -2011,7 +1968,7 @@ function _tryrequire_from_serialized(pkg::PkgId, path::String, ocachepath::Union
depmods[i] = dep
end
# then load the file
- loaded = _include_from_serialized(pkg, path, ocachepath, depmods, ignore_native; register = true)
+ loaded = _include_from_serialized(pkg, path, ocachepath, depmods; register = true)
return loaded
end
@@ -2178,7 +2135,6 @@ function canstart_loading(modkey::PkgId, build_id::UInt128, stalecheck::Bool)
# load already in progress for this module on the task
task, cond = loading
deps = String[modkey.name]
- pkgid = modkey
assert_havelock(cond.lock)
if debug_loading_deadlocks && current_task() !== task
waiters = Dict{Task,Pair{Task,PkgId}}() # invert to track waiting tasks => loading tasks
@@ -2198,18 +2154,26 @@ function canstart_loading(modkey::PkgId, build_id::UInt128, stalecheck::Bool)
end
end
if current_task() === task
- others = String[modkey.name] # repeat this to emphasize the cycle here
+ push!(deps, modkey.name) # repeat this to emphasize the cycle here
+ others = Set{String}()
for each in package_locks # list the rest of the packages being loaded too
if each[2][1] === task
other = each[1].name
- other == modkey.name || other == pkgid.name || push!(others, other)
+ other == modkey.name || push!(others, other)
end
end
+ # remove duplicates from others already in deps
+ for dep in deps
+ delete!(others, dep)
+ end
msg = sprint(deps, others) do io, deps, others
print(io, "deadlock detected in loading ")
- join(io, deps, " -> ")
- print(io, " -> ")
- join(io, others, " && ")
+ join(io, deps, " using ")
+ if !isempty(others)
+ print(io, " (while loading ")
+ join(io, others, " and ")
+ print(io, ")")
+ end
end
throw(ConcurrencyViolationError(msg))
end
@@ -2383,6 +2347,10 @@ function __require(into::Module, mod::Symbol)
error("`using/import $mod` outside of a Module detected. Importing a package outside of a module \
is not allowed during package precompilation.")
end
+ topmod = moduleroot(into)
+ if nameof(topmod) === mod
+ return topmod
+ end
@lock require_lock begin
LOADING_CACHE[] = LoadingCache()
try
@@ -2491,10 +2459,7 @@ function _require_prelocked(uuidkey::PkgId, env=nothing)
try
toplevel_load[] = false
m = __require_prelocked(uuidkey, env)
- if m === nothing
- error("package `$(uuidkey.name)` did not define the expected \
- module `$(uuidkey.name)`, check for typos in package module name")
- end
+ m isa Module || check_package_module_loaded_error(uuidkey)
finally
toplevel_load[] = last
end_loading(uuidkey, m)
@@ -2984,6 +2949,9 @@ const newly_inferred = CodeInstance[]
function include_package_for_output(pkg::PkgId, input::String, depot_path::Vector{String}, dl_load_path::Vector{String}, load_path::Vector{String},
concrete_deps::typeof(_concrete_dependencies), source::Union{Nothing,String})
+ @lock require_lock begin
+ m = start_loading(pkg, UInt128(0), false)
+ @assert m === nothing
append!(empty!(Base.DEPOT_PATH), depot_path)
append!(empty!(Base.DL_LOAD_PATH), dl_load_path)
append!(empty!(Base.LOAD_PATH), load_path)
@@ -2992,6 +2960,8 @@ function include_package_for_output(pkg::PkgId, input::String, depot_path::Vecto
Base._track_dependencies[] = true
get!(Base.PkgOrigin, Base.pkgorigins, pkg).path = input
append!(empty!(Base._concrete_dependencies), concrete_deps)
+ end
+
uuid_tuple = pkg.uuid === nothing ? (UInt64(0), UInt64(0)) : convert(NTuple{2, UInt64}, pkg.uuid)
ccall(:jl_set_module_uuid, Cvoid, (Any, NTuple{2, UInt64}), Base.__toplevel__, uuid_tuple)
@@ -3010,21 +2980,22 @@ function include_package_for_output(pkg::PkgId, input::String, depot_path::Vecto
ccall(:jl_set_newly_inferred, Cvoid, (Any,), nothing)
end
# check that the package defined the expected module so we can give a nice error message if not
- Base.check_package_module_loaded(pkg)
+ m = maybe_root_module(pkg)
+ m isa Module || check_package_module_loaded_error(pkg)
# Re-populate the runtime's newly-inferred array, which will be included
# in the output. We removed it above to avoid including any code we may
# have compiled for error handling and validation.
ccall(:jl_set_newly_inferred, Cvoid, (Any,), newly_inferred)
+ @lock require_lock end_loading(pkg, m)
+ # insert_extension_triggers(pkg)
+ # run_package_callbacks(pkg)
end
-function check_package_module_loaded(pkg::PkgId)
- if !haskey(Base.loaded_modules, pkg)
- # match compilecache error type for non-125 errors
- error("$(repr("text/plain", pkg)) did not define the expected module `$(pkg.name)`, \
- check for typos in package module name")
- end
- return nothing
+function check_package_module_loaded_error(pkg)
+ # match compilecache error type for non-125 errors
+ error("package `$(pkg.name)` did not define the expected \
+ module `$(pkg.name)`, check for typos in package module name")
end
# protects against PkgId and UUID being imported and losing Base prefix
@@ -3100,7 +3071,6 @@ function create_expr_cache(pkg::PkgId, input::String, output::String, output_o::
Base.track_nested_precomp($(_pkg_str(vcat(Base.precompilation_stack, pkg))))
Base.loadable_extensions = $(_pkg_str(loadable_exts))
Base.precompiling_extension = $(loading_extension)
- Base.precompilation_target = $(_pkg_str(pkg))
Base.include_package_for_output($(_pkg_str(pkg)), $(repr(abspath(input))), $(repr(depot_path)), $(repr(dl_load_path)),
$(repr(load_path)), $(_pkg_str(concrete_deps)), $(repr(source_path(nothing))))
""")
@@ -3157,11 +3127,11 @@ This can be used to reduce package load times. Cache files are stored in
`DEPOT_PATH[1]/compiled`. See [Module initialization and precompilation](@ref)
for important notes.
"""
-function compilecache(pkg::PkgId, internal_stderr::IO = stderr, internal_stdout::IO = stdout; flags::Cmd=``, reasons::Union{Dict{String,Int},Nothing}=Dict{String,Int}(), loadable_exts::Union{Vector{PkgId},Nothing}=nothing)
+function compilecache(pkg::PkgId, internal_stderr::IO = stderr, internal_stdout::IO = stdout; flags::Cmd=``, cacheflags::CacheFlags=CacheFlags(), reasons::Union{Dict{String,Int},Nothing}=Dict{String,Int}(), loadable_exts::Union{Vector{PkgId},Nothing}=nothing)
@nospecialize internal_stderr internal_stdout
path = locate_package(pkg)
path === nothing && throw(ArgumentError("$(repr("text/plain", pkg)) not found during precompilation"))
- return compilecache(pkg, path, internal_stderr, internal_stdout; flags, reasons, loadable_exts)
+ return compilecache(pkg, path, internal_stderr, internal_stdout; flags, cacheflags, reasons, loadable_exts)
end
const MAX_NUM_PRECOMPILE_FILES = Ref(10)
diff --git a/base/lock.jl b/base/lock.jl
index 59e554c01c24a..b6d633d7907a2 100644
--- a/base/lock.jl
+++ b/base/lock.jl
@@ -252,7 +252,7 @@ function wait_no_relock(c::GenericCondition)
try
return wait()
catch
- ct.queue === nothing || list_deletefirst!(ct.queue, ct)
+ ct.queue === nothing || list_deletefirst!(ct.queue::IntrusiveLinkedList{Task}, ct)
rethrow()
end
end
@@ -693,7 +693,7 @@ julia> procstate === fetch(@async global_state())
true
```
"""
-mutable struct OncePerProcess{T, F}
+mutable struct OncePerProcess{T, F} <: Function
value::Union{Nothing,T}
@atomic state::UInt8 # 0=initial, 1=hasrun, 2=error
@atomic allow_compile_time::Bool
@@ -702,25 +702,27 @@ mutable struct OncePerProcess{T, F}
function OncePerProcess{T,F}(initializer::F) where {T, F}
once = new{T,F}(nothing, PerStateInitial, true, initializer, ReentrantLock())
- ccall(:jl_set_precompile_field_replace, Cvoid, (Any, Any, Any),
- once, :value, nothing)
- ccall(:jl_set_precompile_field_replace, Cvoid, (Any, Any, Any),
- once, :state, PerStateInitial)
return once
end
end
+OncePerProcess{T}(initializer::Type{U}) where {T, U} = OncePerProcess{T, Type{U}}(initializer)
OncePerProcess{T}(initializer::F) where {T, F} = OncePerProcess{T, F}(initializer)
+OncePerProcess(initializer::Type{U}) where U = OncePerProcess{Base.promote_op(initializer), Type{U}}(initializer)
OncePerProcess(initializer) = OncePerProcess{Base.promote_op(initializer), typeof(initializer)}(initializer)
-@inline function (once::OncePerProcess{T})() where T
+@inline function (once::OncePerProcess{T,F})() where {T,F}
state = (@atomic :acquire once.state)
if state != PerStateHasrun
- (@noinline function init_perprocesss(once, state)
+ (@noinline function init_perprocesss(once::OncePerProcess{T,F}, state::UInt8) where {T,F}
state == PerStateErrored && error("OncePerProcess initializer failed previously")
once.allow_compile_time || __precompile__(false)
lock(once.lock)
try
state = @atomic :monotonic once.state
if state == PerStateInitial
+ ccall(:jl_set_precompile_field_replace, Cvoid, (Any, Any, Any),
+ once, :value, nothing)
+ ccall(:jl_set_precompile_field_replace, Cvoid, (Any, Any, Any),
+ once, :state, PerStateInitial)
once.value = once.initializer()
elseif state == PerStateErrored
error("OncePerProcess initializer failed previously")
@@ -801,7 +803,7 @@ julia> threadvec === thread_state[Threads.threadid()]
true
```
"""
-mutable struct OncePerThread{T, F}
+mutable struct OncePerThread{T, F} <: Function
@atomic xs::AtomicMemory{T} # values
@atomic ss::AtomicMemory{UInt8} # states: 0=initial, 1=hasrun, 2=error, 3==concurrent
const initializer::F
@@ -809,23 +811,21 @@ mutable struct OncePerThread{T, F}
function OncePerThread{T,F}(initializer::F) where {T, F}
xs, ss = AtomicMemory{T}(), AtomicMemory{UInt8}()
once = new{T,F}(xs, ss, initializer)
- ccall(:jl_set_precompile_field_replace, Cvoid, (Any, Any, Any),
- once, :xs, xs)
- ccall(:jl_set_precompile_field_replace, Cvoid, (Any, Any, Any),
- once, :ss, ss)
return once
end
end
+OncePerThread{T}(initializer::Type{U}) where {T, U} = OncePerThread{T,Type{U}}(initializer)
OncePerThread{T}(initializer::F) where {T, F} = OncePerThread{T,F}(initializer)
+OncePerThread(initializer::Type{U}) where U = OncePerThread{Base.promote_op(initializer), Type{U}}(initializer)
OncePerThread(initializer) = OncePerThread{Base.promote_op(initializer), typeof(initializer)}(initializer)
-@inline (once::OncePerThread)() = once[Threads.threadid()]
-@inline function getindex(once::OncePerThread, tid::Integer)
+@inline (once::OncePerThread{T,F})() where {T,F} = once[Threads.threadid()]
+@inline function getindex(once::OncePerThread{T,F}, tid::Integer) where {T,F}
tid = Int(tid)
ss = @atomic :acquire once.ss
xs = @atomic :monotonic once.xs
# n.b. length(xs) >= length(ss)
if tid <= 0 || tid > length(ss) || (@atomic :acquire ss[tid]) != PerStateHasrun
- (@noinline function init_perthread(once, tid)
+ (@noinline function init_perthread(once::OncePerThread{T,F}, tid::Int) where {T,F}
local ss = @atomic :acquire once.ss
local xs = @atomic :monotonic once.xs
local len = length(ss)
@@ -849,6 +849,12 @@ OncePerThread(initializer) = OncePerThread{Base.promote_op(initializer), typeof(
ss = @atomic :monotonic once.ss
xs = @atomic :monotonic once.xs
if tid > length(ss)
+ if length(ss) == 0 # We are the first to initialize
+ ccall(:jl_set_precompile_field_replace, Cvoid, (Any, Any, Any),
+ once, :xs, xs)
+ ccall(:jl_set_precompile_field_replace, Cvoid, (Any, Any, Any),
+ once, :ss, ss)
+ end
@assert len <= length(ss) <= length(newss) "logical constraint violation"
fill_monotonic!(newss, PerStateInitial)
xs = copyto_monotonic!(newxs, xs)
@@ -926,11 +932,15 @@ Making lazy task value...done.
false
```
"""
-mutable struct OncePerTask{T, F}
+mutable struct OncePerTask{T, F} <: Function
const initializer::F
+ OncePerTask{T}(initializer::Type{U}) where {T, U} = new{T,Type{U}}(initializer)
OncePerTask{T}(initializer::F) where {T, F} = new{T,F}(initializer)
OncePerTask{T,F}(initializer::F) where {T, F} = new{T,F}(initializer)
+ OncePerTask(initializer::Type{U}) where U = new{Base.promote_op(initializer), Type{U}}(initializer)
OncePerTask(initializer) = new{Base.promote_op(initializer), typeof(initializer)}(initializer)
end
-@inline (once::OncePerTask)() = get!(once.initializer, task_local_storage(), once)
+@inline function (once::OncePerTask{T,F})() where {T,F}
+ get!(once.initializer, task_local_storage(), once)::T
+end
diff --git a/base/logging/ConsoleLogger.jl b/base/logging/ConsoleLogger.jl
index 818b2272b773c..8766d0ae56331 100644
--- a/base/logging/ConsoleLogger.jl
+++ b/base/logging/ConsoleLogger.jl
@@ -9,6 +9,9 @@ interactive work with the Julia REPL.
Log levels less than `min_level` are filtered out.
+This Logger is thread-safe, with locks for both orchestration of message
+limits i.e. `maxlog`, and writes to the stream.
+
Message formatting can be controlled by setting keyword arguments:
* `meta_formatter` is a function which takes the log event metadata
@@ -24,6 +27,7 @@ Message formatting can be controlled by setting keyword arguments:
"""
struct ConsoleLogger <: AbstractLogger
stream::IO
+ lock::ReentrantLock # do not log within this lock
min_level::LogLevel
meta_formatter
show_limited::Bool
@@ -33,19 +37,19 @@ end
function ConsoleLogger(stream::IO, min_level=Info;
meta_formatter=default_metafmt, show_limited=true,
right_justify=0)
- ConsoleLogger(stream, min_level, meta_formatter,
+ ConsoleLogger(stream, ReentrantLock(), min_level, meta_formatter,
show_limited, right_justify, Dict{Any,Int}())
end
function ConsoleLogger(min_level=Info;
meta_formatter=default_metafmt, show_limited=true,
right_justify=0)
- ConsoleLogger(closed_stream, min_level, meta_formatter,
+ ConsoleLogger(closed_stream, ReentrantLock(), min_level, meta_formatter,
show_limited, right_justify, Dict{Any,Int}())
end
shouldlog(logger::ConsoleLogger, level, _module, group, id) =
- get(logger.message_limits, id, 1) > 0
+ @lock logger.lock get(logger.message_limits, id, 1) > 0
min_enabled_level(logger::ConsoleLogger) = logger.min_level
@@ -109,9 +113,11 @@ function handle_message(logger::ConsoleLogger, level::LogLevel, message, _module
hasmaxlog = haskey(kwargs, :maxlog) ? 1 : 0
maxlog = get(kwargs, :maxlog, nothing)
if maxlog isa Core.BuiltinInts
- remaining = get!(logger.message_limits, id, Int(maxlog)::Int)
- logger.message_limits[id] = remaining - 1
- remaining > 0 || return
+ @lock logger.lock begin
+ remaining = get!(logger.message_limits, id, Int(maxlog)::Int)
+ remaining == 0 && return
+ logger.message_limits[id] = remaining - 1
+ end
end
# Generate a text representation of the message and all key value pairs,
@@ -184,6 +190,7 @@ function handle_message(logger::ConsoleLogger, level::LogLevel, message, _module
println(iob)
end
- write(stream, take!(buf))
+ b = take!(buf)
+ @lock logger.lock write(stream, b)
nothing
end
diff --git a/base/logging/logging.jl b/base/logging/logging.jl
index a1a8417bcb388..c4a3d21fed982 100644
--- a/base/logging/logging.jl
+++ b/base/logging/logging.jl
@@ -132,6 +132,7 @@ isless(a::LogLevel, b::LogLevel) = isless(a.level, b.level)
+(level::LogLevel, inc::Integer) = LogLevel(level.level+inc)
-(level::LogLevel, inc::Integer) = LogLevel(level.level-inc)
convert(::Type{LogLevel}, level::Integer) = LogLevel(level)
+convert(::Type{Int32}, level::LogLevel) = level.level
"""
BelowMinLevel
@@ -171,7 +172,8 @@ Alias for [`LogLevel(1_000_001)`](@ref LogLevel).
const AboveMaxLevel = LogLevel( 1000001)
# Global log limiting mechanism for super fast but inflexible global log limiting.
-const _min_enabled_level = Ref{LogLevel}(Debug)
+# Atomic ensures that the value is always consistent across threads.
+const _min_enabled_level = Threads.Atomic{Int32}(Debug)
function show(io::IO, level::LogLevel)
if level == BelowMinLevel print(io, "BelowMinLevel")
@@ -394,7 +396,7 @@ function logmsg_code(_module, file, line, level, message, exs...)
level = $level
# simplify std_level code emitted, if we know it is one of our global constants
std_level = $(level isa Symbol ? :level : :(level isa $LogLevel ? level : convert($LogLevel, level)::$LogLevel))
- if std_level >= $(_min_enabled_level)[]
+ if std_level.level >= $(_min_enabled_level)[]
group = $(log_data._group)
_module = $(log_data._module)
logger = $(current_logger_for_env)(std_level, group, _module)
@@ -541,7 +543,8 @@ with_logstate(f::Function, logstate) = @with(CURRENT_LOGSTATE => logstate, f())
Disable all log messages at log levels equal to or less than `level`. This is
a *global* setting, intended to make debug logging extremely cheap when
-disabled.
+disabled. Note that this cannot be used to enable logging that is currently disabled
+by other mechanisms.
# Examples
```julia
@@ -663,17 +666,21 @@ close(closed_stream)
Simplistic logger for logging all messages with level greater than or equal to
`min_level` to `stream`. If stream is closed then messages with log level
greater or equal to `Warn` will be logged to `stderr` and below to `stdout`.
+
+This Logger is thread-safe, with a lock taken around orchestration of message
+limits i.e. `maxlog`, and writes to the stream.
"""
struct SimpleLogger <: AbstractLogger
stream::IO
+ lock::ReentrantLock
min_level::LogLevel
message_limits::Dict{Any,Int}
end
-SimpleLogger(stream::IO, level=Info) = SimpleLogger(stream, level, Dict{Any,Int}())
+SimpleLogger(stream::IO, level=Info) = SimpleLogger(stream, ReentrantLock(), level, Dict{Any,Int}())
SimpleLogger(level=Info) = SimpleLogger(closed_stream, level)
shouldlog(logger::SimpleLogger, level, _module, group, id) =
- get(logger.message_limits, id, 1) > 0
+ @lock logger.lock get(logger.message_limits, id, 1) > 0
min_enabled_level(logger::SimpleLogger) = logger.min_level
@@ -684,15 +691,14 @@ function handle_message(logger::SimpleLogger, level::LogLevel, message, _module,
@nospecialize
maxlog = get(kwargs, :maxlog, nothing)
if maxlog isa Core.BuiltinInts
- remaining = get!(logger.message_limits, id, Int(maxlog)::Int)
- logger.message_limits[id] = remaining - 1
- remaining > 0 || return
+ @lock logger.lock begin
+ remaining = get!(logger.message_limits, id, Int(maxlog)::Int)
+ remaining == 0 && return
+ logger.message_limits[id] = remaining - 1
+ end
end
buf = IOBuffer()
stream::IO = logger.stream
- if !(isopen(stream)::Bool)
- stream = stderr
- end
iob = IOContext(buf, stream)
levelstr = level == Warn ? "Warning" : string(level)
msglines = eachsplit(chomp(convert(String, string(message))::String), '\n')
@@ -706,7 +712,13 @@ function handle_message(logger::SimpleLogger, level::LogLevel, message, _module,
println(iob, "│ ", key, " = ", val)
end
println(iob, "└ @ ", _module, " ", filepath, ":", line)
- write(stream, take!(buf))
+ b = take!(buf)
+ @lock logger.lock begin
+ if !(isopen(stream)::Bool)
+ stream = stderr
+ end
+ write(stream, b)
+ end
nothing
end
diff --git a/base/math.jl b/base/math.jl
index 650fc6bc0cef0..beef8de09bd8b 100644
--- a/base/math.jl
+++ b/base/math.jl
@@ -1217,7 +1217,8 @@ end
# this method is only reliable for -2^20 < n < 2^20 (cf. #53881 #53886)
@assume_effects :terminates_locally @noinline function pow_body(x::Float64, n::Integer)
y = 1.0
- xnlo = ynlo = 0.0
+ xnlo = -0.0
+ ynlo = 0.0
n == 3 && return x*x*x # keep compatibility with literal_pow
if n < 0
rx = inv(x)
@@ -1541,7 +1542,7 @@ for f in (:sin, :cos, :tan, :asin, :atan, :acos,
:exponent, :sqrt, :cbrt, :sinpi, :cospi, :sincospi, :tanpi)
@eval function ($f)(x::Real)
xf = float(x)
- x === xf && throw(MethodError($f, (x,)))
+ xf isa typeof(x) && throw(MethodError($f, (x,)))
return ($f)(xf)
end
@eval $(f)(::Missing) = missing
diff --git a/base/meta.jl b/base/meta.jl
index 36875b8e2c625..4807b910c494a 100644
--- a/base/meta.jl
+++ b/base/meta.jl
@@ -427,7 +427,7 @@ function _partially_inline!(@nospecialize(x), slot_replacements::Vector{Any},
elseif i == 4
@assert isa(x.args[4], Int)
elseif i == 5
- @assert isa((x.args[5]::QuoteNode).value, Union{Symbol, Tuple{Symbol, UInt8}})
+ @assert isa((x.args[5]::QuoteNode).value, Union{Symbol, Tuple{Symbol, UInt16, Bool}})
else
x.args[i] = _partially_inline!(x.args[i], slot_replacements,
type_signature, static_param_values,
diff --git a/base/methodshow.jl b/base/methodshow.jl
index a2158cb9180e4..7fdefc9b7311f 100644
--- a/base/methodshow.jl
+++ b/base/methodshow.jl
@@ -131,13 +131,17 @@ function fixup_stdlib_path(path::String)
# The file defining Base.Sys gets included after this file is included so make sure
# this function is valid even in this intermediary state
if isdefined(@__MODULE__, :Sys)
- BUILD_STDLIB_PATH = Sys.BUILD_STDLIB_PATH::String
- STDLIB = Sys.STDLIB::String
- if BUILD_STDLIB_PATH != STDLIB
+ if Sys.BUILD_STDLIB_PATH != Sys.STDLIB
# BUILD_STDLIB_PATH gets defined in sysinfo.jl
npath = normpath(path)
- npath′ = replace(npath, normpath(BUILD_STDLIB_PATH) => normpath(STDLIB))
- return npath == npath′ ? path : npath′
+ npath′ = replace(npath, normpath(Sys.BUILD_STDLIB_PATH) => normpath(Sys.STDLIB))
+ path = npath == npath′ ? path : npath′
+ end
+ if isdefined(@__MODULE__, :Core) && isdefined(Core, :Compiler)
+ compiler_folder = dirname(String(Base.moduleloc(Core.Compiler).file))
+ if dirname(path) == compiler_folder
+ return abspath(Sys.STDLIB, "..", "..", "Compiler", "src", basename(path))
+ end
end
end
return path
diff --git a/base/mpfr.jl b/base/mpfr.jl
index 1e39f52b9d1a3..0d52510447b2f 100644
--- a/base/mpfr.jl
+++ b/base/mpfr.jl
@@ -20,12 +20,15 @@ import
sinpi, cospi, sincospi, tanpi, sind, cosd, tand, asind, acosd, atand,
uinttype, exponent_max, exponent_min, ieee754_representation, significand_mask
+import .Core: AbstractFloat
+import .Base: Rational, Float16, Float32, Float64, Bool
+
using .Base.Libc
import ..Rounding: Rounding,
rounding_raw, setrounding_raw, rounds_to_nearest, rounds_away_from_zero,
tie_breaker_is_to_even, correct_rounding_requires_increment
-import ..GMP: ClongMax, CulongMax, CdoubleMax, Limb, libgmp
+import ..GMP: ClongMax, CulongMax, CdoubleMax, Limb, libgmp, BigInt
import ..FastMath.sincos_fast
@@ -211,6 +214,8 @@ end
Base.unsafe_convert(::Type{Ref{BigFloat}}, x::Ptr{BigFloat}) = error("not compatible with mpfr")
Base.unsafe_convert(::Type{Ref{BigFloat}}, x::Ref{BigFloat}) = error("not compatible with mpfr")
Base.cconvert(::Type{Ref{BigFloat}}, x::BigFloat) = x.d # BigFloatData is the Ref type for BigFloat
+Base.cconvert(::Type{Ref{BigFloat}}, x::Number) = convert(BigFloat, x).d # avoid default conversion to Ref(BigFloat(x))
+Base.cconvert(::Type{Ref{BigFloat}}, x::Ref{BigFloat}) = x[].d
function Base.unsafe_convert(::Type{Ref{BigFloat}}, x::BigFloatData)
d = getfield(x, :d)
p = Base.unsafe_convert(Ptr{Limb}, d)
diff --git a/base/multidimensional.jl b/base/multidimensional.jl
index ba08f0679590b..40fff7243cd55 100644
--- a/base/multidimensional.jl
+++ b/base/multidimensional.jl
@@ -4,10 +4,12 @@
module IteratorsMD
import .Base: eltype, length, size, first, last, in, getindex, setindex!,
min, max, zero, oneunit, isless, eachindex,
- convert, show, iterate, promote_rule, to_indices, copy
+ convert, show, iterate, promote_rule, to_indices, copy,
+ isassigned
import .Base: +, -, *, (:)
import .Base: simd_outer_range, simd_inner_length, simd_index, setindex
+ import Core: Tuple
using .Base: to_index, fill_to_length, tail, safe_tail
using .Base: IndexLinear, IndexCartesian, AbstractCartesianIndex,
ReshapedArray, ReshapedArrayLF, OneTo, Fix1
diff --git a/base/namedtuple.jl b/base/namedtuple.jl
index 991c4d35da52f..f71b13852b953 100644
--- a/base/namedtuple.jl
+++ b/base/namedtuple.jl
@@ -1,5 +1,7 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license
+import Core: NamedTuple
+
"""
NamedTuple
diff --git a/base/options.jl b/base/options.jl
index 3281ec0de98d2..1373c06c0c53f 100644
--- a/base/options.jl
+++ b/base/options.jl
@@ -17,6 +17,7 @@ struct JLOptions
nprocs::Int32
machine_file::Ptr{UInt8}
project::Ptr{UInt8}
+ program_file::Ptr{UInt8}
isinteractive::Int8
color::Int8
historyfile::Int8
@@ -63,6 +64,7 @@ struct JLOptions
trim::Int8
task_metrics::Int8
timeout_for_safepoint_straggler_s::Int16
+ safe_crash_log_file::Ptr{UInt8}
end
# This runs early in the sysimage != is not defined yet
diff --git a/base/partr.jl b/base/partr.jl
index 6053a584af5ba..a759e9fa5405c 100644
--- a/base/partr.jl
+++ b/base/partr.jl
@@ -97,7 +97,7 @@ function multiq_sift_down(heap::taskheap, idx::Int32)
child = Int(child)
child > length(heap.tasks) && break
if isassigned(heap.tasks, child) &&
- heap.tasks[child].priority < heap.tasks[idx].priority
+ heap.tasks[child].priority <= heap.tasks[idx].priority
t = heap.tasks[idx]
heap.tasks[idx] = heap.tasks[child]
heap.tasks[child] = t
diff --git a/base/pcre.jl b/base/pcre.jl
index e4567fe03e8f8..213fc1890f51d 100644
--- a/base/pcre.jl
+++ b/base/pcre.jl
@@ -199,7 +199,7 @@ end
exec(re, subject::Union{String,SubString{String}}, offset, options, match_data) =
_exec(re, subject, offset, options, match_data)
exec(re, subject, offset, options, match_data) =
- _exec(re, String(subject), offset, options, match_data)
+ _exec(re, String(subject)::String, offset, options, match_data)
function _exec(re, subject, offset, options, match_data)
rc = ccall((:pcre2_match_8, PCRE_LIB), Cint,
diff --git a/base/pointer.jl b/base/pointer.jl
index de2f413d8f881..97b9bf0e01732 100644
--- a/base/pointer.jl
+++ b/base/pointer.jl
@@ -59,8 +59,6 @@ cconvert(::Type{Ptr{UInt8}}, s::AbstractString) = String(s)
cconvert(::Type{Ptr{Int8}}, s::AbstractString) = String(s)
unsafe_convert(::Type{Ptr{UInt8}}, x::Symbol) = ccall(:jl_symbol_name, Ptr{UInt8}, (Any,), x)
unsafe_convert(::Type{Ptr{Int8}}, x::Symbol) = ccall(:jl_symbol_name, Ptr{Int8}, (Any,), x)
-unsafe_convert(::Type{Ptr{UInt8}}, s::String) = ccall(:jl_string_ptr, Ptr{UInt8}, (Any,), s)
-unsafe_convert(::Type{Ptr{Int8}}, s::String) = ccall(:jl_string_ptr, Ptr{Int8}, (Any,), s)
cconvert(::Type{<:Ptr}, a::Array) = getfield(a, :ref)
unsafe_convert(::Type{Ptr{S}}, a::AbstractArray{T}) where {S,T} = convert(Ptr{S}, unsafe_convert(Ptr{T}, a))
diff --git a/base/precompilation.jl b/base/precompilation.jl
index 820cf260df71f..c24026aa2a8ef 100644
--- a/base/precompilation.jl
+++ b/base/precompilation.jl
@@ -143,15 +143,16 @@ function ExplicitEnv(envpath::String=Base.active_project())
# Extensions
deps_pkg = get(Dict{String, Any}, pkg_info, "extensions")::Dict{String, Any}
+ deps_pkg_concrete = Dict{String, Vector{String}}()
for (ext, triggers) in deps_pkg
if triggers isa String
triggers = [triggers]
else
triggers = triggers::Vector{String}
end
- deps_pkg[ext] = triggers
+ deps_pkg_concrete[ext] = triggers
end
- extensions[m_uuid] = deps_pkg
+ extensions[m_uuid] = deps_pkg_concrete
# Determine strategy to find package
lookup_strat = begin
@@ -300,7 +301,7 @@ function show_progress(io::IO, p::MiniProgressBar; termwidth=nothing, carriagere
else
string(p.current, "/", p.max)
end
- termwidth = @something termwidth displaysize(io)[2]
+ termwidth = @something termwidth (displaysize(io)::Tuple{Int,Int})[2]
max_progress_width = max(0, min(termwidth - textwidth(p.header) - textwidth(progress_text) - 10 , p.width))
n_filled = floor(Int, max_progress_width * perc / 100)
partial_filled = (max_progress_width * perc / 100) - n_filled
@@ -408,6 +409,53 @@ function excluded_circular_deps_explanation(io::IOContext{IO}, ext_to_parent::Di
return msg
end
+
+function scan_pkg!(stack, could_be_cycle, cycles, pkg, dmap)
+ if haskey(could_be_cycle, pkg)
+ return could_be_cycle[pkg]
+ else
+ return scan_deps!(stack, could_be_cycle, cycles, pkg, dmap)
+ end
+end
+function scan_deps!(stack, could_be_cycle, cycles, pkg, dmap)
+ push!(stack, pkg)
+ cycle = nothing
+ for dep in dmap[pkg]
+ if dep in stack
+ # Created fresh cycle
+ cycle′ = stack[findlast(==(dep), stack):end]
+ if cycle === nothing || length(cycle′) < length(cycle)
+ cycle = cycle′ # try to report smallest cycle possible
+ end
+ elseif scan_pkg!(stack, could_be_cycle, cycles, dep, dmap)
+ # Reaches an existing cycle
+ could_be_cycle[pkg] = true
+ pop!(stack)
+ return true
+ end
+ end
+ pop!(stack)
+ if cycle !== nothing
+ push!(cycles, cycle)
+ could_be_cycle[pkg] = true
+ return true
+ end
+ could_be_cycle[pkg] = false
+ return false
+end
+
+# restrict to dependencies of given packages
+function collect_all_deps(direct_deps, dep, alldeps=Set{Base.PkgId}())
+ for _dep in direct_deps[dep]
+ if !(_dep in alldeps)
+ push!(alldeps, _dep)
+ collect_all_deps(direct_deps, _dep, alldeps)
+ end
+ end
+ return alldeps
+end
+
+
function precompilepkgs(pkgs::Vector{String}=String[];
internal_call::Bool=false,
strict::Bool = false,
@@ -433,7 +481,7 @@ function _precompilepkgs(pkgs::Vector{String},
timing::Bool,
_from_loading::Bool,
configs::Vector{Config},
- io::IOContext{IO},
+ _io::IOContext{IO},
fancyprint::Bool,
manifest::Bool,
ignore_loaded::Bool)
@@ -451,10 +499,9 @@ function _precompilepkgs(pkgs::Vector{String},
num_tasks = parse(Int, get(ENV, "JULIA_NUM_PRECOMPILE_TASKS", string(default_num_tasks)))
parallel_limiter = Base.Semaphore(num_tasks)
- if _from_loading && !Sys.isinteractive() && Base.get_bool_env("JULIA_TESTS", false)
- # suppress passive loading printing in julia test suite. `JULIA_TESTS` is set in Base.runtests
- io = IOContext{IO}(devnull)
- end
+ # suppress passive loading printing in julia test suite. `JULIA_TESTS` is set in Base.runtests
+ io = (_from_loading && !Sys.isinteractive() && Base.get_bool_env("JULIA_TESTS", false)) ? IOContext{IO}(devnull) : _io
+
nconfigs = length(configs)
hascolor = get(io, :color, false)::Bool
@@ -551,7 +598,7 @@ function _precompilepkgs(pkgs::Vector{String},
end
end
- indirect_deps = Dict{Base.PkgId, Set{Base.PkgId}}()
+ local indirect_deps = Dict{Base.PkgId, Set{Base.PkgId}}()
for package in keys(direct_deps)
# Initialize a set to keep track of all dependencies for 'package'
all_deps = Set{Base.PkgId}()
@@ -618,46 +665,14 @@ function _precompilepkgs(pkgs::Vector{String},
could_be_cycle = Dict{Base.PkgId, Bool}()
# temporary stack for the SCC-like algorithm below
stack = Base.PkgId[]
- function scan_pkg!(pkg, dmap)
- if haskey(could_be_cycle, pkg)
- return could_be_cycle[pkg]
- else
- return scan_deps!(pkg, dmap)
- end
- end
- function scan_deps!(pkg, dmap)
- push!(stack, pkg)
- cycle = nothing
- for dep in dmap[pkg]
- if dep in stack
- # Created fresh cycle
- cycle′ = stack[findlast(==(dep), stack):end]
- if cycle === nothing || length(cycle′) < length(cycle)
- cycle = cycle′ # try to report smallest cycle possible
- end
- elseif scan_pkg!(dep, dmap)
- # Reaches an existing cycle
- could_be_cycle[pkg] = true
- pop!(stack)
- return true
- end
- end
- pop!(stack)
- if cycle !== nothing
- push!(cycles, cycle)
- could_be_cycle[pkg] = true
- return true
- end
- could_be_cycle[pkg] = false
- return false
- end
+
# set of packages that depend on a cycle (either because they are
# a part of a cycle themselves or because they transitively depend
# on a package in some cycle)
circular_deps = Base.PkgId[]
for pkg in keys(direct_deps)
@assert isempty(stack)
- if scan_pkg!(pkg, direct_deps)
+ if scan_pkg!(stack, could_be_cycle, cycles, pkg, direct_deps)
push!(circular_deps, pkg)
for pkg_config in keys(was_processed)
# notify all to allow skipping
@@ -674,16 +689,6 @@ function _precompilepkgs(pkgs::Vector{String},
if isempty(pkgs)
pkgs = [pkg.name for pkg in project_deps]
end
- # restrict to dependencies of given packages
- function collect_all_deps(direct_deps, dep, alldeps=Set{Base.PkgId}())
- for _dep in direct_deps[dep]
- if !(_dep in alldeps)
- push!(alldeps, _dep)
- collect_all_deps(direct_deps, _dep, alldeps)
- end
- end
- return alldeps
- end
keep = Set{Base.PkgId}()
for dep in direct_deps
dep_pkgid = first(dep)
@@ -710,13 +715,13 @@ function _precompilepkgs(pkgs::Vector{String},
end
end
- target = nothing
+ target = Ref{Union{Nothing, String}}(nothing)
if nconfigs == 1
if !isempty(only(configs)[1])
- target = "for configuration $(join(only(configs)[1], " "))"
+ target[] = "for configuration $(join(only(configs)[1], " "))"
end
else
- target = "for $nconfigs compilation configurations..."
+ target[] = "for $nconfigs compilation configurations..."
end
@debug "precompile: packages filtered"
@@ -726,7 +731,7 @@ function _precompilepkgs(pkgs::Vector{String},
print_lock = io.io isa Base.LibuvStream ? io.io.lock::ReentrantLock : ReentrantLock()
first_started = Base.Event()
- printloop_should_exit::Bool = !fancyprint # exit print loop immediately if not fancy printing
+ printloop_should_exit = Ref{Bool}(!fancyprint) # exit print loop immediately if not fancy printing
interrupted_or_done = Base.Event()
ansi_moveup(n::Int) = string("\e[", n, "A")
@@ -735,19 +740,19 @@ function _precompilepkgs(pkgs::Vector{String},
ansi_cleartoendofline = "\e[0K"
ansi_enablecursor = "\e[?25h"
ansi_disablecursor = "\e[?25l"
- n_done::Int = 0
- n_already_precomp::Int = 0
- n_loaded::Int = 0
- interrupted = false
+ n_done = Ref(0)
+ n_already_precomp = Ref(0)
+ n_loaded = Ref(0)
+ interrupted = Ref(false)
- function handle_interrupt(err, in_printloop = false)
+ function handle_interrupt(err, in_printloop::Bool)
notify(interrupted_or_done)
in_printloop || wait(t_print) # wait to let the print loop cease first
if err isa InterruptException
- lock(print_lock) do
+ @lock print_lock begin
println(io, " Interrupted: Exiting precompilation...", ansi_cleartoendofline)
end
- interrupted = true
+ interrupted[] = true
return true
else
return false
@@ -756,34 +761,34 @@ function _precompilepkgs(pkgs::Vector{String},
std_outputs = Dict{PkgConfig,IOBuffer}()
taskwaiting = Set{PkgConfig}()
pkgspidlocked = Dict{PkgConfig,String}()
- pkg_liveprinted = nothing
+ pkg_liveprinted = Ref{Union{Nothing, PkgId}}(nothing)
function monitor_std(pkg_config, pipe; single_requested_pkg=false)
pkg, config = pkg_config
try
liveprinting = false
while !eof(pipe)
- str = readline(pipe, keep=true)
+ local str = readline(pipe, keep=true)
if single_requested_pkg && (liveprinting || !isempty(str))
- lock(print_lock) do
+ @lock print_lock begin
if !liveprinting
printpkgstyle(io, :Info, "Given $(pkg.name) was explicitly requested, output will be shown live $ansi_cleartoendofline",
color = Base.info_color())
liveprinting = true
- pkg_liveprinted = pkg
+ pkg_liveprinted[] = pkg
end
print(io, ansi_cleartoendofline, str)
end
end
write(get!(IOBuffer, std_outputs, pkg_config), str)
if !in(pkg_config, taskwaiting) && occursin("waiting for IO to finish", str)
- !fancyprint && lock(print_lock) do
+ !fancyprint && @lock print_lock begin
println(io, pkg.name, color_string(" Waiting for background task / IO / timer.", Base.warn_color()))
end
push!(taskwaiting, pkg_config)
end
if !fancyprint && in(pkg_config, taskwaiting)
- lock(print_lock) do
+ @lock print_lock begin
print(io, str)
end
end
@@ -798,9 +803,9 @@ function _precompilepkgs(pkgs::Vector{String},
try
wait(first_started)
(isempty(pkg_queue) || interrupted_or_done.set) && return
- lock(print_lock) do
- if target !== nothing
- printpkgstyle(io, :Precompiling, target)
+ @lock print_lock begin
+ if target[] !== nothing
+ printpkgstyle(io, :Precompiling, target[])
end
if fancyprint
print(io, ansi_disablecursor)
@@ -812,12 +817,12 @@ function _precompilepkgs(pkgs::Vector{String},
last_length = 0
bar = MiniProgressBar(; indent=0, header = "Precompiling packages ", color = :green, percentage=false, always_reprint=true)
n_total = length(direct_deps) * length(configs)
- bar.max = n_total - n_already_precomp
+ bar.max = n_total - n_already_precomp[]
final_loop = false
n_print_rows = 0
- while !printloop_should_exit
- lock(print_lock) do
- term_size = displaysize(io)
+ while !printloop_should_exit[]
+ @lock print_lock begin
+ term_size = displaysize(io)::Tuple{Int, Int}
num_deps_show = max(term_size[1] - 3, 2) # show at least 2 deps
pkg_queue_show = if !interrupted_or_done.set && length(pkg_queue) > num_deps_show
last(pkg_queue, num_deps_show)
@@ -828,14 +833,14 @@ function _precompilepkgs(pkgs::Vector{String},
if i > 1
print(iostr, ansi_cleartoend)
end
- bar.current = n_done - n_already_precomp
- bar.max = n_total - n_already_precomp
+ bar.current = n_done[] - n_already_precomp[]
+ bar.max = n_total - n_already_precomp[]
# when sizing to the terminal width subtract a little to give some tolerance to resizing the
# window between print cycles
- termwidth = displaysize(io)[2] - 4
+ termwidth = (displaysize(io)::Tuple{Int,Int})[2] - 4
if !final_loop
- str = sprint(io -> show_progress(io, bar; termwidth, carriagereturn=false); context=io)
- print(iostr, Base._truncate_at_width_or_chars(true, str, termwidth), "\n")
+ s = sprint(io -> show_progress(io, bar; termwidth, carriagereturn=false); context=io)
+ print(iostr, Base._truncate_at_width_or_chars(true, s, termwidth), "\n")
end
for pkg_config in pkg_queue_show
dep, config = pkg_config
@@ -876,10 +881,10 @@ function _precompilepkgs(pkgs::Vector{String},
last_length = length(pkg_queue_show)
n_print_rows = count("\n", str_)
print(io, str_)
- printloop_should_exit = interrupted_or_done.set && final_loop
+ printloop_should_exit[] = interrupted_or_done.set && final_loop
final_loop = interrupted_or_done.set # ensures one more loop to tidy last task after finish
i += 1
- printloop_should_exit || print(io, ansi_moveup(n_print_rows), ansi_movecol1)
+ printloop_should_exit[] || print(io, ansi_moveup(n_print_rows), ansi_movecol1)
end
wait(t)
end
@@ -930,9 +935,9 @@ function _precompilepkgs(pkgs::Vector{String},
t_monitor = @async monitor_std(pkg_config, std_pipe; single_requested_pkg)
name = describe_pkg(pkg, is_project_dep, flags, cacheflags)
- lock(print_lock) do
+ @lock print_lock begin
if !fancyprint && isempty(pkg_queue)
- printpkgstyle(io, :Precompiling, something(target, "packages..."))
+ printpkgstyle(io, :Precompiling, something(target[], "packages..."))
end
end
push!(pkg_queue, pkg_config)
@@ -959,16 +964,16 @@ function _precompilepkgs(pkgs::Vector{String},
end
if ret isa Base.PrecompilableError
push!(precomperr_deps, pkg_config)
- !fancyprint && lock(print_lock) do
+ !fancyprint && @lock print_lock begin
println(io, _timing_string(t), color_string(" ? ", Base.warn_color()), name)
end
else
- !fancyprint && lock(print_lock) do
+ !fancyprint && @lock print_lock begin
println(io, _timing_string(t), color_string(" ✓ ", loaded ? Base.warn_color() : :green), name)
end
was_recompiled[pkg_config] = true
end
- loaded && (n_loaded += 1)
+ loaded && (n_loaded[] += 1)
catch err
# @show err
close(std_pipe.in) # close pipe to end the std output monitor
@@ -977,7 +982,7 @@ function _precompilepkgs(pkgs::Vector{String},
errmsg = String(take!(get(IOBuffer, std_outputs, pkg_config)))
delete!(std_outputs, pkg_config) # so it's not shown as warnings, given error report
failed_deps[pkg_config] = (strict || is_project_dep) ? string(sprint(showerror, err), "\n", strip(errmsg)) : ""
- !fancyprint && lock(print_lock) do
+ !fancyprint && @lock print_lock begin
println(io, " "^9, color_string(" ✗ ", Base.error_color()), name)
end
else
@@ -989,15 +994,15 @@ function _precompilepkgs(pkgs::Vector{String},
Base.release(parallel_limiter)
end
else
- is_stale || (n_already_precomp += 1)
+ is_stale || (n_already_precomp[] += 1)
end
- n_done += 1
+ n_done[] += 1
notify(was_processed[pkg_config])
catch err_outer
# For debugging:
# println("Task failed $err_outer")
# Base.display_error(ErrorException(""), Base.catch_backtrace())# logging doesn't show here
- handle_interrupt(err_outer) || rethrow()
+ handle_interrupt(err_outer, false) || rethrow()
notify(was_processed[pkg_config])
finally
filter!(!istaskdone, tasks)
@@ -1012,13 +1017,13 @@ function _precompilepkgs(pkgs::Vector{String},
try
wait(interrupted_or_done)
catch err
- handle_interrupt(err) || rethrow()
+ handle_interrupt(err, false) || rethrow()
finally
Base.LOADING_CACHE[] = nothing
end
notify(first_started) # in cases of no-op or !fancyprint
fancyprint && wait(t_print)
- quick_exit = !all(istaskdone, tasks) || interrupted # if some not finished internal error is likely
+ quick_exit = !all(istaskdone, tasks) || interrupted[] # if some not finished internal error is likely
seconds_elapsed = round(Int, (time_ns() - time_start) / 1e9)
ndeps = count(values(was_recompiled))
if ndeps > 0 || !isempty(failed_deps) || (quick_exit && !isempty(std_outputs))
@@ -1030,18 +1035,18 @@ function _precompilepkgs(pkgs::Vector{String},
end
plural = length(configs) > 1 ? "dependency configurations" : ndeps == 1 ? "dependency" : "dependencies"
print(iostr, " $(ndeps) $(plural) successfully precompiled in $(seconds_elapsed) seconds")
- if n_already_precomp > 0 || !isempty(circular_deps)
- n_already_precomp > 0 && (print(iostr, ". $n_already_precomp already precompiled"))
+ if n_already_precomp[] > 0 || !isempty(circular_deps)
+ n_already_precomp[] > 0 && (print(iostr, ". $(n_already_precomp[]) already precompiled"))
!isempty(circular_deps) && (print(iostr, ". $(length(circular_deps)) skipped due to circular dependency"))
print(iostr, ".")
end
- if n_loaded > 0
- plural1 = length(configs) > 1 ? "dependency configurations" : n_loaded == 1 ? "dependency" : "dependencies"
- plural2 = n_loaded == 1 ? "a different version is" : "different versions are"
- plural3 = n_loaded == 1 ? "" : "s"
- plural4 = n_loaded == 1 ? "this package" : "these packages"
+ if n_loaded[] > 0
+ local plural1 = length(configs) > 1 ? "dependency configurations" : n_loaded[] == 1 ? "dependency" : "dependencies"
+ local plural2 = n_loaded[] == 1 ? "a different version is" : "different versions are"
+ local plural3 = n_loaded[] == 1 ? "" : "s"
+ local plural4 = n_loaded[] == 1 ? "this package" : "these packages"
print(iostr, "\n ",
- color_string(string(n_loaded), Base.warn_color()),
+ color_string(string(n_loaded[]), Base.warn_color()),
" $(plural1) precompiled but ",
color_string("$(plural2) currently loaded", Base.warn_color()),
". Restart julia to access the new version$(plural3). \
@@ -1061,12 +1066,12 @@ function _precompilepkgs(pkgs::Vector{String},
let std_outputs = Tuple{PkgConfig,SubString{String}}[(pkg_config, strip(String(take!(io)))) for (pkg_config,io) in std_outputs]
filter!(kv -> !isempty(last(kv)), std_outputs)
if !isempty(std_outputs)
- plural1 = length(std_outputs) == 1 ? "y" : "ies"
- plural2 = length(std_outputs) == 1 ? "" : "s"
+ local plural1 = length(std_outputs) == 1 ? "y" : "ies"
+ local plural2 = length(std_outputs) == 1 ? "" : "s"
print(iostr, "\n ", color_string("$(length(std_outputs))", Base.warn_color()), " dependenc$(plural1) had output during precompilation:")
for (pkg_config, err) in std_outputs
pkg, config = pkg_config
- err = if pkg == pkg_liveprinted
+ err = if pkg == pkg_liveprinted[]
"[Output was shown above]"
else
join(split(err, "\n"), color_string("\n│ ", Base.warn_color()))
@@ -1078,7 +1083,7 @@ function _precompilepkgs(pkgs::Vector{String},
end
end
let str=str
- lock(print_lock) do
+ @lock print_lock begin
println(io, str)
end
end
@@ -1156,7 +1161,7 @@ function precompile_pkgs_maybe_cachefile_lock(f, io::IO, print_lock::ReentrantLo
else
"another machine (hostname: $hostname, pid: $pid, pidfile: $pidfile)"
end
- !fancyprint && lock(print_lock) do
+ !fancyprint && @lock print_lock begin
println(io, " ", pkg.name, _color_string(" Being precompiled by $(pkgspidlocked[pkg_config])", Base.info_color(), hascolor))
end
Base.release(parallel_limiter) # release so other work can be done while waiting
diff --git a/base/promotion.jl b/base/promotion.jl
index 72257f8ba5a3d..719cd2dc32b61 100644
--- a/base/promotion.jl
+++ b/base/promotion.jl
@@ -199,7 +199,7 @@ end
function typejoin_union_tuple(T::DataType)
@_foldable_meta
- p = T.parameters
+ p = T.parameters::Core.SimpleVector
lr = length(p)
if lr == 0
return Tuple{}
diff --git a/base/public.jl b/base/public.jl
index 8777a454c920a..4960a08e1ad0b 100644
--- a/base/public.jl
+++ b/base/public.jl
@@ -20,6 +20,7 @@ public
Generator,
ImmutableDict,
OneTo,
+ Pairs,
LogRange,
UUID,
@@ -51,6 +52,7 @@ public
active_project,
# Reflection and introspection
+ get_extension,
isambiguous,
isexpr,
isidentifier,
diff --git a/base/reflection.jl b/base/reflection.jl
index 78e701692a2a7..528202a9196ba 100644
--- a/base/reflection.jl
+++ b/base/reflection.jl
@@ -46,92 +46,6 @@ function code_lowered(@nospecialize(f), @nospecialize(t=Tuple); generated::Bool=
return ret
end
-# high-level, more convenient method lookup functions
-
-function visit(f, mt::Core.MethodTable)
- mt.defs !== nothing && visit(f, mt.defs)
- nothing
-end
-function visit(f, mc::Core.TypeMapLevel)
- function avisit(f, e::Memory{Any})
- for i in 2:2:length(e)
- isassigned(e, i) || continue
- ei = e[i]
- if ei isa Memory{Any}
- for j in 2:2:length(ei)
- isassigned(ei, j) || continue
- visit(f, ei[j])
- end
- else
- visit(f, ei)
- end
- end
- end
- if mc.targ !== nothing
- avisit(f, mc.targ::Memory{Any})
- end
- if mc.arg1 !== nothing
- avisit(f, mc.arg1::Memory{Any})
- end
- if mc.tname !== nothing
- avisit(f, mc.tname::Memory{Any})
- end
- if mc.name1 !== nothing
- avisit(f, mc.name1::Memory{Any})
- end
- mc.list !== nothing && visit(f, mc.list)
- mc.any !== nothing && visit(f, mc.any)
- nothing
-end
-function visit(f, d::Core.TypeMapEntry)
- while d !== nothing
- f(d.func)
- d = d.next
- end
- nothing
-end
-struct MethodSpecializations
- specializations::Union{Nothing, Core.MethodInstance, Core.SimpleVector}
-end
-"""
- specializations(m::Method) → itr
-
-Return an iterator `itr` of all compiler-generated specializations of `m`.
-"""
-specializations(m::Method) = MethodSpecializations(isdefined(m, :specializations) ? m.specializations : nothing)
-function iterate(specs::MethodSpecializations)
- s = specs.specializations
- s === nothing && return nothing
- isa(s, Core.MethodInstance) && return (s, nothing)
- return iterate(specs, 0)
-end
-iterate(specs::MethodSpecializations, ::Nothing) = nothing
-function iterate(specs::MethodSpecializations, i::Int)
- s = specs.specializations::Core.SimpleVector
- n = length(s)
- i >= n && return nothing
- item = nothing
- while i < n && item === nothing
- item = s[i+=1]
- end
- item === nothing && return nothing
- return (item, i)
-end
-length(specs::MethodSpecializations) = count(Returns(true), specs)
-
-function length(mt::Core.MethodTable)
- n = 0
- visit(mt) do m
- n += 1
- end
- return n::Int
-end
-isempty(mt::Core.MethodTable) = (mt.defs === nothing)
-
-uncompressed_ir(m::Method) = isdefined(m, :source) ? _uncompressed_ir(m) :
- isdefined(m, :generator) ? error("Method is @generated; try `code_lowered` instead.") :
- error("Code for this Method is not available.")
-
# for backwards compat
const uncompressed_ast = uncompressed_ir
const _uncompressed_ast = _uncompressed_ir
@@ -235,23 +149,22 @@ struct CodegenParams
use_jlplt::Cint
"""
- If enabled, only provably reachable code (from functions marked with `entrypoint`) is included
- in the output system image. Errors or warnings can be given for call sites too dynamic to handle.
- The option is disabled by default. (0=>disabled, 1=>safe (static errors), 2=>unsafe, 3=>unsafe plus warnings)
+ If enabled emit LLVM IR for all functions even if wouldn't be compiled
+ for some reason (i.e functions that return a constant value).
"""
- trim::Cint
+ force_emit_all::Cint
function CodegenParams(; track_allocations::Bool=true, code_coverage::Bool=true,
prefer_specsig::Bool=false,
gnu_pubnames::Bool=true, debug_info_kind::Cint = default_debug_info_kind(),
debug_info_level::Cint = Cint(JLOptions().debug_level), safepoint_on_entry::Bool=true,
- gcstack_arg::Bool=true, use_jlplt::Bool=true, trim::Cint=Cint(0))
+ gcstack_arg::Bool=true, use_jlplt::Bool=true, force_emit_all::Bool=false)
return new(
Cint(track_allocations), Cint(code_coverage),
Cint(prefer_specsig),
Cint(gnu_pubnames), debug_info_kind,
debug_info_level, Cint(safepoint_on_entry),
- Cint(gcstack_arg), Cint(use_jlplt), Cint(trim))
+ Cint(gcstack_arg), Cint(use_jlplt), Cint(force_emit_all))
end
end
@@ -1369,9 +1282,9 @@ function invokelatest_gr(gr::GlobalRef, @nospecialize args...; kwargs...)
@inline
kwargs = merge(NamedTuple(), kwargs)
if isempty(kwargs)
- return Core._call_latest(apply_gr, gr, args...)
+ return invokelatest(apply_gr, gr, args...)
end
- return Core._call_latest(apply_gr_kw, kwargs, gr, args...)
+ return invokelatest(apply_gr_kw, kwargs, gr, args...)
end
"""
diff --git a/base/refpointer.jl b/base/refpointer.jl
index 5027462eeb6b6..c5968934aa748 100644
--- a/base/refpointer.jl
+++ b/base/refpointer.jl
@@ -1,5 +1,7 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license
+import Core: Ref
+
"""
Ref{T}
diff --git a/base/rounding.jl b/base/rounding.jl
index 98b4c30822245..5865c9aef3b5f 100644
--- a/base/rounding.jl
+++ b/base/rounding.jl
@@ -2,7 +2,7 @@
module Rounding
-let fenv_consts = Vector{Cint}(undef, 9)
+let fenv_consts = Array{Cint,1}(undef, 9)
ccall(:jl_get_fenv_consts, Cvoid, (Ptr{Cint},), fenv_consts)
global const JL_FE_INEXACT = fenv_consts[1]
global const JL_FE_UNDERFLOW = fenv_consts[2]
diff --git a/base/runtime_internals.jl b/base/runtime_internals.jl
index b61e24c11f3f9..b6190b3e9044e 100644
--- a/base/runtime_internals.jl
+++ b/base/runtime_internals.jl
@@ -177,28 +177,6 @@ ispublic(m::Module, s::Symbol) = ccall(:jl_module_public_p, Cint, (Any, Any), m,
# `Base.deprecate`, not the @deprecated macro:
isdeprecated(m::Module, s::Symbol) = ccall(:jl_is_binding_deprecated, Cint, (Any, Any), m, s) != 0
-"""
- isbindingresolved(m::Module, s::Symbol) -> Bool
-
-Returns whether the binding of a symbol in a module is resolved.
-
-See also: [`isexported`](@ref), [`ispublic`](@ref), [`isdeprecated`](@ref)
-
-```jldoctest
-julia> module Mod
- foo() = 17
- end
-Mod
-
-julia> Base.isbindingresolved(Mod, :foo)
-true
-
-julia> Base.isbindingresolved(Mod, :bar)
-false
-```
-"""
-isbindingresolved(m::Module, var::Symbol) = ccall(:jl_binding_resolved_p, Cint, (Any, Any), m, var) != 0
-
function binding_module(m::Module, s::Symbol)
p = ccall(:jl_get_module_of_binding, Ptr{Cvoid}, (Any, Any), m, s)
p == C_NULL && return m
@@ -219,27 +197,44 @@ function _fieldnames(@nospecialize t)
end
# N.B.: Needs to be synced with julia.h
-const BINDING_KIND_CONST = 0x0
-const BINDING_KIND_CONST_IMPORT = 0x1
-const BINDING_KIND_GLOBAL = 0x2
-const BINDING_KIND_IMPLICIT = 0x3
-const BINDING_KIND_EXPLICIT = 0x4
-const BINDING_KIND_IMPORTED = 0x5
-const BINDING_KIND_FAILED = 0x6
-const BINDING_KIND_DECLARED = 0x7
-const BINDING_KIND_GUARD = 0x8
-const BINDING_KIND_UNDEF_CONST = 0x9
-const BINDING_KIND_BACKDATED_CONST = 0xa
-
-is_defined_const_binding(kind::UInt8) = (kind == BINDING_KIND_CONST || kind == BINDING_KIND_CONST_IMPORT || kind == BINDING_KIND_BACKDATED_CONST)
-is_some_const_binding(kind::UInt8) = (is_defined_const_binding(kind) || kind == BINDING_KIND_UNDEF_CONST)
-is_some_imported(kind::UInt8) = (kind == BINDING_KIND_IMPLICIT || kind == BINDING_KIND_EXPLICIT || kind == BINDING_KIND_IMPORTED)
-is_some_guard(kind::UInt8) = (kind == BINDING_KIND_GUARD || kind == BINDING_KIND_DECLARED || kind == BINDING_KIND_FAILED || kind == BINDING_KIND_UNDEF_CONST)
+const PARTITION_KIND_CONST = 0x0
+const PARTITION_KIND_CONST_IMPORT = 0x1
+const PARTITION_KIND_GLOBAL = 0x2
+const PARTITION_KIND_IMPLICIT_GLOBAL = 0x3
+const PARTITION_KIND_IMPLICIT_CONST = 0x4
+const PARTITION_KIND_EXPLICIT = 0x5
+const PARTITION_KIND_IMPORTED = 0x6
+const PARTITION_KIND_FAILED = 0x7
+const PARTITION_KIND_DECLARED = 0x8
+const PARTITION_KIND_GUARD = 0x9
+const PARTITION_KIND_UNDEF_CONST = 0xa
+const PARTITION_KIND_BACKDATED_CONST = 0xb
+
+const PARTITION_FLAG_EXPORTED = 0x10
+const PARTITION_FLAG_DEPRECATED = 0x20
+const PARTITION_FLAG_DEPWARN = 0x40
+
+const PARTITION_MASK_KIND = 0x0f
+const PARTITION_MASK_FLAG = 0xf0
+
+const BINDING_FLAG_ANY_IMPLICIT_EDGES = 0x8
+
+is_defined_const_binding(kind::UInt8) = (kind == PARTITION_KIND_CONST || kind == PARTITION_KIND_CONST_IMPORT || kind == PARTITION_KIND_IMPLICIT_CONST || kind == PARTITION_KIND_BACKDATED_CONST)
+is_some_const_binding(kind::UInt8) = (is_defined_const_binding(kind) || kind == PARTITION_KIND_UNDEF_CONST)
+is_some_imported(kind::UInt8) = (kind == PARTITION_KIND_IMPLICIT_GLOBAL || kind == PARTITION_KIND_IMPLICIT_CONST || kind == PARTITION_KIND_EXPLICIT || kind == PARTITION_KIND_IMPORTED)
+is_some_implicit(kind::UInt8) = (kind == PARTITION_KIND_IMPLICIT_GLOBAL || kind == PARTITION_KIND_IMPLICIT_CONST || kind == PARTITION_KIND_GUARD || kind == PARTITION_KIND_FAILED)
+is_some_explicit_imported(kind::UInt8) = (kind == PARTITION_KIND_EXPLICIT || kind == PARTITION_KIND_IMPORTED)
+is_some_binding_imported(kind::UInt8) = is_some_explicit_imported(kind) || kind == PARTITION_KIND_IMPLICIT_GLOBAL
+is_some_guard(kind::UInt8) = (kind == PARTITION_KIND_GUARD || kind == PARTITION_KIND_FAILED || kind == PARTITION_KIND_UNDEF_CONST)
function lookup_binding_partition(world::UInt, b::Core.Binding)
ccall(:jl_get_binding_partition, Ref{Core.BindingPartition}, (Any, UInt), b, world)
end
+function lookup_binding_partition(world::UInt, b::Core.Binding, previous_partition::Core.BindingPartition)
+ ccall(:jl_get_binding_partition_with_hint, Ref{Core.BindingPartition}, (Any, Any, UInt), b, previous_partition, world)
+end
+
function convert(::Type{Core.Binding}, gr::Core.GlobalRef)
if isdefined(gr, :binding)
return gr.binding
@@ -412,8 +407,13 @@ parentmodule(t::UnionAll) = parentmodule(unwrap_unionall(t))
"""
isconst(m::Module, s::Symbol) -> Bool
+ isconst(g::GlobalRef)
-Determine whether a global is declared `const` in a given module `m`.
+Determine whether a global is `const` in a given module `m`, either
+because it was declared constant or because it was imported from a
+constant binding. Note that constant-ness is specific to a particular
+world age, so the result of this function may not be assumed to hold
+after a world age update.
"""
isconst(m::Module, s::Symbol) =
ccall(:jl_is_const, Cint, (Any, Any), m, s) != 0
@@ -1115,7 +1115,7 @@ function datatype_fieldcount(t::DataType)
return length(names)
end
if types isa DataType && types <: Tuple
- return fieldcount(types)
+ return datatype_fieldcount(types)
end
return nothing
elseif isabstracttype(t)
@@ -1325,6 +1325,24 @@ function MethodList(mt::Core.MethodTable)
return MethodList(ms, mt)
end
+function matches_to_methods(ms::Array{Any,1}, mt::Core.MethodTable, mod)
+ # Lack of specialization => a comprehension triggers too many invalidations via _collect, so collect the methods manually
+ ms = Method[(ms[i]::Core.MethodMatch).method for i in 1:length(ms)]
+ # Remove shadowed methods with identical type signatures
+ prev = nothing
+ filter!(ms) do m
+ l = prev
+ repeated = (l isa Method && m.sig == l.sig)
+ prev = m
+ return !repeated
+ end
+ # Remove methods not part of module (after removing shadowed methods)
+ mod === nothing || filter!(ms) do m
+ return parentmodule(m) ∈ mod
+ end
+ return MethodList(ms, mt)
+end
+
"""
methods(f, [types], [module])
@@ -1332,7 +1350,7 @@ Return the method table for `f`.
If `types` is specified, return an array of methods whose types match.
If `module` is specified, return an array of methods defined in that module.
-A list of modules can also be specified as an array.
+A list of modules can also be specified as an array or set.
!!! compat "Julia 1.4"
At least Julia 1.4 is required for specifying a module.
@@ -1340,16 +1358,11 @@ A list of modules can also be specified as an array.
See also: [`which`](@ref), [`@which`](@ref Main.InteractiveUtils.@which) and [`methodswith`](@ref Main.InteractiveUtils.methodswith).
"""
function methods(@nospecialize(f), @nospecialize(t),
- mod::Union{Tuple{Module},AbstractArray{Module},Nothing}=nothing)
+ mod::Union{Tuple{Module},AbstractArray{Module},AbstractSet{Module},Nothing}=nothing)
world = get_world_counter()
world == typemax(UInt) && error("code reflection cannot be used from generated functions")
- # Lack of specialization => a comprehension triggers too many invalidations via _collect, so collect the methods manually
- ms = Method[]
- for m in _methods(f, t, -1, world)::Vector
- m = m::Core.MethodMatch
- (mod === nothing || parentmodule(m.method) ∈ mod) && push!(ms, m.method)
- end
- MethodList(ms, typeof(f).name.mt)
+ ms = _methods(f, t, -1, world)::Vector{Any}
+ return matches_to_methods(ms, typeof(f).name.mt, mod)
end
methods(@nospecialize(f), @nospecialize(t), mod::Module) = methods(f, t, (mod,))
@@ -1359,12 +1372,12 @@ function methods_including_ambiguous(@nospecialize(f), @nospecialize(t))
world == typemax(UInt) && error("code reflection cannot be used from generated functions")
min = RefValue{UInt}(typemin(UInt))
max = RefValue{UInt}(typemax(UInt))
- ms = _methods_by_ftype(tt, nothing, -1, world, true, min, max, Ptr{Int32}(C_NULL))::Vector
- return MethodList(Method[(m::Core.MethodMatch).method for m in ms], typeof(f).name.mt)
+ ms = _methods_by_ftype(tt, nothing, -1, world, true, min, max, Ptr{Int32}(C_NULL))::Vector{Any}
+ return matches_to_methods(ms, typeof(f).name.mt, nothing)
end
function methods(@nospecialize(f),
- mod::Union{Module,AbstractArray{Module},Nothing}=nothing)
+ mod::Union{Module,AbstractArray{Module},AbstractSet{Module},Nothing}=nothing)
# return all matches
return methods(f, Tuple{Vararg{Any}}, mod)
end
@@ -1557,12 +1570,18 @@ end
is_nospecialized(method::Method) = method.nospecialize ≠ 0
is_nospecializeinfer(method::Method) = method.nospecializeinfer && is_nospecialized(method)
+
+"""
+Return MethodInstance corresponding to `atype` and `sparams`.
+
+No widening / narrowing / compileable-normalization of `atype` is performed.
+"""
function specialize_method(method::Method, @nospecialize(atype), sparams::SimpleVector; preexisting::Bool=false)
@inline
if isa(atype, UnionAll)
atype, sparams = normalize_typevars(method, atype, sparams)
end
- if is_nospecializeinfer(method)
+ if is_nospecializeinfer(method) # TODO: this shouldn't be here
atype = get_nospecializeinfer_sig(method, atype, sparams)
end
if preexisting
@@ -1584,3 +1603,92 @@ hasintersect(@nospecialize(a), @nospecialize(b)) = typeintersect(a, b) !== Botto
###########
_topmod(m::Module) = ccall(:jl_base_relative_to, Any, (Any,), m)::Module
+
+
+# high-level, more convenient method lookup functions
+
+function visit(f, mt::Core.MethodTable)
+ mt.defs !== nothing && visit(f, mt.defs)
+ nothing
+end
+function visit(f, mc::Core.TypeMapLevel)
+ function avisit(f, e::Memory{Any})
+ for i in 2:2:length(e)
+ isassigned(e, i) || continue
+ ei = e[i]
+ if ei isa Memory{Any}
+ for j in 2:2:length(ei)
+ isassigned(ei, j) || continue
+ visit(f, ei[j])
+ end
+ else
+ visit(f, ei)
+ end
+ end
+ end
+ if mc.targ !== nothing
+ avisit(f, mc.targ::Memory{Any})
+ end
+ if mc.arg1 !== nothing
+ avisit(f, mc.arg1::Memory{Any})
+ end
+ if mc.tname !== nothing
+ avisit(f, mc.tname::Memory{Any})
+ end
+ if mc.name1 !== nothing
+ avisit(f, mc.name1::Memory{Any})
+ end
+ mc.list !== nothing && visit(f, mc.list)
+ mc.any !== nothing && visit(f, mc.any)
+ nothing
+end
+function visit(f, d::Core.TypeMapEntry)
+ while d !== nothing
+ f(d.func)
+ d = d.next
+ end
+ nothing
+end
+struct MethodSpecializations
+ specializations::Union{Nothing, Core.MethodInstance, Core.SimpleVector}
+end
+"""
+ specializations(m::Method) → itr
+
+Return an iterator `itr` of all compiler-generated specializations of `m`.
+"""
+specializations(m::Method) = MethodSpecializations(isdefined(m, :specializations) ? m.specializations : nothing)
+function iterate(specs::MethodSpecializations)
+ s = specs.specializations
+ s === nothing && return nothing
+ isa(s, Core.MethodInstance) && return (s, nothing)
+ return iterate(specs, 0)
+end
+iterate(specs::MethodSpecializations, ::Nothing) = nothing
+function iterate(specs::MethodSpecializations, i::Int)
+ s = specs.specializations::Core.SimpleVector
+ n = length(s)
+ i >= n && return nothing
+ item = nothing
+ while i < n && item === nothing
+ item = s[i+=1]
+ end
+ item === nothing && return nothing
+ return (item, i)
+end
+length(specs::MethodSpecializations) = count(Returns(true), specs)
+
+function length(mt::Core.MethodTable)
+ n = 0
+ visit(mt) do m
+ n += 1
+ end
+ return n::Int
+end
+isempty(mt::Core.MethodTable) = (mt.defs === nothing)
+
+uncompressed_ir(m::Method) = isdefined(m, :source) ? _uncompressed_ir(m) :
+ isdefined(m, :generator) ? error("Method is @generated; try `code_lowered` instead.") :
+ error("Code for this Method is not available.")
+
+has_image_globalref(m::Method) = ccall(:jl_ir_flag_has_image_globalref, Bool, (Any,), m.source)
diff --git a/base/shell.jl b/base/shell.jl
index e07fff128acfe..68925cbd5d5af 100644
--- a/base/shell.jl
+++ b/base/shell.jl
@@ -344,7 +344,7 @@ function shell_escape_csh(io::IO, args::AbstractString...)
end
shell_escape_csh(args::AbstractString...) =
sprint(shell_escape_csh, args...;
- sizehint = sum(sizeof.(args)) + length(args) * 3)
+ sizehint = sum(sizeof, args) + length(args) * 3)
"""
shell_escape_wincmd(s::AbstractString)
@@ -494,4 +494,4 @@ function escape_microsoft_c_args(io::IO, args::AbstractString...)
end
escape_microsoft_c_args(args::AbstractString...) =
sprint(escape_microsoft_c_args, args...;
- sizehint = (sum(sizeof.(args)) + 3*length(args)))
+ sizehint = (sum(sizeof, args) + 3*length(args)))
diff --git a/base/show.jl b/base/show.jl
index 42788f05eceb5..3453c29956d59 100644
--- a/base/show.jl
+++ b/base/show.jl
@@ -618,7 +618,7 @@ function make_typealias(@nospecialize(x::Type))
Any === x && return nothing
x <: Tuple && return nothing
mods = modulesof!(Set{Module}(), x)
- Core in mods && push!(mods, Base)
+ replace!(mods, Core=>Base)
aliases = Tuple{GlobalRef,SimpleVector}[]
xenv = UnionAll[]
for p in uniontypes(unwrap_unionall(x))
@@ -1021,18 +1021,30 @@ end
# If an object with this name exists in 'from', we need to check that it's the same binding
# and that it's not deprecated.
function isvisible(sym::Symbol, parent::Module, from::Module)
- owner = ccall(:jl_binding_owner, Ptr{Cvoid}, (Any, Any), parent, sym)
- from_owner = ccall(:jl_binding_owner, Ptr{Cvoid}, (Any, Any), from, sym)
- return owner !== C_NULL && from_owner === owner &&
- !isdeprecated(parent, sym) &&
- isdefinedglobal(from, sym) # if we're going to return true, force binding resolution
+ isdeprecated(parent, sym) && return false
+ isdefinedglobal(from, sym) || return false
+ isdefinedglobal(parent, sym) || return false
+ parent_binding = convert(Core.Binding, GlobalRef(parent, sym))
+ from_binding = convert(Core.Binding, GlobalRef(from, sym))
+ while true
+ from_binding === parent_binding && return true
+ partition = lookup_binding_partition(tls_world_age(), from_binding)
+ is_some_explicit_imported(binding_kind(partition)) || break
+ from_binding = partition_restriction(partition)::Core.Binding
+ end
+ parent_partition = lookup_binding_partition(tls_world_age(), parent_binding)
+ from_partition = lookup_binding_partition(tls_world_age(), from_binding)
+ if is_defined_const_binding(binding_kind(parent_partition)) && is_defined_const_binding(binding_kind(from_partition))
+ return parent_partition.restriction === from_partition.restriction
+ end
+ return false
end
function is_global_function(tn::Core.TypeName, globname::Union{Symbol,Nothing})
if globname !== nothing
globname_str = string(globname::Symbol)
if ('#' ∉ globname_str && '@' ∉ globname_str && isdefined(tn, :module) &&
- isbindingresolved(tn.module, globname) && isdefinedglobal(tn.module, globname) &&
+ isdefinedglobal(tn.module, globname) &&
isconcretetype(tn.wrapper) && isa(getglobal(tn.module, globname), tn.wrapper))
return true
end
@@ -1045,10 +1057,13 @@ function check_world_bounded(tn::Core.TypeName)
isdefined(bnd, :partitions) || return nothing
partition = @atomic bnd.partitions
while true
- if is_defined_const_binding(binding_kind(partition)) && partition_restriction(partition) <: tn.wrapper
- max_world = @atomic partition.max_world
- max_world == typemax(UInt) && return nothing
- return Int(partition.min_world):Int(max_world)
+ if is_defined_const_binding(binding_kind(partition))
+ cval = partition_restriction(partition)
+ if isa(cval, Type) && cval <: tn.wrapper
+ max_world = @atomic partition.max_world
+ max_world == typemax(UInt) && return nothing
+ return Int(partition.min_world):Int(max_world)
+ end
end
isdefined(partition, :next) || return nothing
partition = @atomic partition.next
@@ -2828,7 +2843,6 @@ function show(io::IO, vm::Core.TypeofVararg)
end
Compiler.load_irshow!()
-const IRShow = Compiler.IRShow # an alias for compatibility
function show(io::IO, src::CodeInfo; debuginfo::Symbol=:source)
# Fix slot names and types in function body
@@ -3362,33 +3376,55 @@ function print_partition(io::IO, partition::Core.BindingPartition)
else
print(io, max_world)
end
+ if (partition.kind & PARTITION_MASK_FLAG) != 0
+ first = false
+ print(io, " [")
+ if (partition.kind & PARTITION_FLAG_EXPORTED) != 0
+ print(io, "exported")
+ end
+ if (partition.kind & PARTITION_FLAG_DEPRECATED) != 0
+ first ? (first = false) : print(io, ",")
+ print(io, "deprecated")
+ end
+ if (partition.kind & PARTITION_FLAG_DEPWARN) != 0
+ first ? (first = false) : print(io, ",")
+ print(io, "depwarn")
+ end
+ print(io, "]")
+ end
print(io, " - ")
kind = binding_kind(partition)
- if kind == BINDING_KIND_BACKDATED_CONST
+ if kind == PARTITION_KIND_BACKDATED_CONST
print(io, "backdated constant binding to ")
print(io, partition_restriction(partition))
- elseif is_defined_const_binding(kind)
+ elseif kind == PARTITION_KIND_CONST
print(io, "constant binding to ")
print(io, partition_restriction(partition))
- elseif kind == BINDING_KIND_UNDEF_CONST
+ elseif kind == PARTITION_KIND_CONST_IMPORT
+ print(io, "constant binding (declared with `import`) to ")
+ print(io, partition_restriction(partition))
+ elseif kind == PARTITION_KIND_UNDEF_CONST
print(io, "undefined const binding")
- elseif kind == BINDING_KIND_GUARD
+ elseif kind == PARTITION_KIND_GUARD
print(io, "undefined binding - guard entry")
- elseif kind == BINDING_KIND_FAILED
+ elseif kind == PARTITION_KIND_FAILED
print(io, "ambiguous binding - guard entry")
- elseif kind == BINDING_KIND_DECLARED
- print(io, "undefined, but declared using `global` - guard entry")
- elseif kind == BINDING_KIND_IMPLICIT
- print(io, "implicit `using` from ")
+ elseif kind == PARTITION_KIND_DECLARED
+ print(io, "weak global binding declared using `global` (implicit type Any)")
+ elseif kind == PARTITION_KIND_IMPLICIT_GLOBAL
+ print(io, "implicit `using` resolved to global ")
+ print(io, partition_restriction(partition).globalref)
+ elseif kind == PARTITION_KIND_IMPLICIT_CONST
+ print(io, "implicit `using` resolved to constant ")
print(io, partition_restriction(partition))
- elseif kind == BINDING_KIND_EXPLICIT
+ elseif kind == PARTITION_KIND_EXPLICIT
print(io, "explicit `using` from ")
- print(io, partition_restriction(partition))
- elseif kind == BINDING_KIND_IMPORTED
+ print(io, partition_restriction(partition).globalref)
+ elseif kind == PARTITION_KIND_IMPORTED
print(io, "explicit `import` from ")
- print(io, partition_restriction(partition))
+ print(io, partition_restriction(partition).globalref)
else
- @assert kind == BINDING_KIND_GLOBAL
+ @assert kind == PARTITION_KIND_GLOBAL
print(io, "global variable with type ")
print(io, partition_restriction(partition))
end
@@ -3403,7 +3439,7 @@ function show(io::IO, ::MIME"text/plain", bnd::Core.Binding)
print(io, "Binding ")
print(io, bnd.globalref)
if !isdefined(bnd, :partitions)
- print(io, "No partitions")
+ print(io, " - No partitions")
else
partition = @atomic bnd.partitions
while true
diff --git a/base/stacktraces.jl b/base/stacktraces.jl
index 01e8a3cf62e72..806c9468efed4 100644
--- a/base/stacktraces.jl
+++ b/base/stacktraces.jl
@@ -7,8 +7,9 @@ module StackTraces
import Base: hash, ==, show
-import Core: CodeInfo, MethodInstance, CodeInstance
-using Base.IRShow: normalize_method_name, append_scopes!, LineInfoNode
+
+using Core: CodeInfo, MethodInstance, CodeInstance
+using Base.IRShow
export StackTrace, StackFrame, stacktrace
@@ -112,7 +113,7 @@ Base.@constprop :none function lookup(pointer::Ptr{Cvoid})
res = Vector{StackFrame}(undef, length(infos))
for i in 1:length(infos)
info = infos[i]::Core.SimpleVector
- @assert(length(info) == 6)
+ @assert length(info) == 6 "corrupt return from jl_lookup_code_address"
func = info[1]::Symbol
file = info[2]::Symbol
linenum = info[3]::Int
@@ -158,8 +159,8 @@ function lookup(ip::Base.InterpreterIP)
end
def = (code isa CodeInfo ? StackTraces : code) # Module just used as a token for top-level code
pc::Int = max(ip.stmt + 1, 0) # n.b. ip.stmt is 0-indexed
- scopes = LineInfoNode[]
- append_scopes!(scopes, pc, codeinfo.debuginfo, def)
+ scopes = IRShow.LineInfoNode[]
+ IRShow.append_scopes!(scopes, pc, codeinfo.debuginfo, def)
if isempty(scopes)
return [StackFrame(func, file, line, code, false, false, 0)]
end
@@ -171,7 +172,7 @@ function lookup(ip::Base.InterpreterIP)
else
def = codeinfo
end
- sf = StackFrame(normalize_method_name(lno.method), lno.file, lno.line, def, false, inlined, 0)
+ sf = StackFrame(IRShow.normalize_method_name(lno.method), lno.file, lno.line, def, false, inlined, 0)
inlined = true
return sf
end
diff --git a/base/stat.jl b/base/stat.jl
index fc2ac9a04b0bf..4f248ec47e6da 100644
--- a/base/stat.jl
+++ b/base/stat.jl
@@ -183,7 +183,8 @@ show(io::IO, ::MIME"text/plain", st::StatStruct) = show_statstruct(io, st, false
# stat & lstat functions
-checkstat(s::StatStruct) = Int(s.ioerrno) in (0, Base.UV_ENOENT, Base.UV_ENOTDIR, Base.UV_EINVAL) ? s : uv_error(string("stat(", repr(s.desc), ")"), s.ioerrno)
+checkstat(s::StatStruct) = Int(s.ioerrno) in (0, Base.UV_ENOENT, Base.UV_ENOTDIR, Base.UV_EINVAL) ? s :
+ _uv_error(string("stat(", repr(s.desc), ")"), s.ioerrno)
macro stat_call(sym, arg1type, arg)
return quote
diff --git a/base/staticdata.jl b/base/staticdata.jl
index a4a40b9af0a7c..0b65d97750194 100644
--- a/base/staticdata.jl
+++ b/base/staticdata.jl
@@ -2,8 +2,8 @@
module StaticData
-using Core: CodeInstance, MethodInstance
-using Base: get_world_counter
+using .Core: CodeInstance, MethodInstance
+using .Base: JLOptions, Compiler, get_world_counter, _methods_by_ftype, get_methodtable
const WORLD_AGE_REVALIDATION_SENTINEL::UInt = 1
const _jl_debug_method_invalidation = Ref{Union{Nothing,Vector{Any}}}(nothing)
@@ -25,7 +25,8 @@ end
function insert_backedges(edges::Vector{Any}, ext_ci_list::Union{Nothing,Vector{Any}}, extext_methods::Vector{Any}, internal_methods::Vector{Any})
# determine which CodeInstance objects are still valid in our image
# to enable any applicable new codes
- methods_with_invalidated_source = Base.scan_new_methods(extext_methods, internal_methods)
+ backedges_only = unsafe_load(cglobal(:jl_first_image_replacement_world, UInt)) == typemax(UInt)
+ methods_with_invalidated_source = Base.scan_new_methods(extext_methods, internal_methods, backedges_only)
stack = CodeInstance[]
visiting = IdDict{CodeInstance,Int}()
_insert_backedges(edges, stack, visiting, methods_with_invalidated_source)
@@ -37,9 +38,16 @@ end
function _insert_backedges(edges::Vector{Any}, stack::Vector{CodeInstance}, visiting::IdDict{CodeInstance,Int}, mwis::IdSet{Method}, external::Bool=false)
for i = 1:length(edges)
codeinst = edges[i]::CodeInstance
- verify_method_graph(codeinst, stack, visiting, mwis)
+ validation_world = get_world_counter()
+ verify_method_graph(codeinst, stack, visiting, mwis, validation_world)
+ # After validation, under the world_counter_lock, set max_world to typemax(UInt) for all dependencies
+ # (recursively). From that point onward the ordinary backedge mechanism is responsible for maintaining
+ # validity.
+ @ccall jl_promote_ci_to_current(codeinst::Any, validation_world::UInt)::Cvoid
minvalid = codeinst.min_world
maxvalid = codeinst.max_world
+ # Finally, if this CI is still valid in some world age and and belongs to an external method(specialization),
+ # poke it that mi's cache
if maxvalid ≥ minvalid && external
caller = get_ci_mi(codeinst)
@assert isdefined(codeinst, :inferred) # See #53586, #53109
@@ -55,29 +63,86 @@ function _insert_backedges(edges::Vector{Any}, stack::Vector{CodeInstance}, visi
end
end
-function verify_method_graph(codeinst::CodeInstance, stack::Vector{CodeInstance}, visiting::IdDict{CodeInstance,Int}, mwis::IdSet{Method})
+function verify_method_graph(codeinst::CodeInstance, stack::Vector{CodeInstance}, visiting::IdDict{CodeInstance,Int}, mwis::IdSet{Method}, validation_world::UInt)
@assert isempty(stack); @assert isempty(visiting);
- child_cycle, minworld, maxworld = verify_method(codeinst, stack, visiting, mwis)
+ child_cycle, minworld, maxworld = verify_method(codeinst, stack, visiting, mwis, validation_world)
@assert child_cycle == 0
@assert isempty(stack); @assert isempty(visiting);
nothing
end
+get_require_world() = unsafe_load(cglobal(:jl_require_world, UInt))
+
+function gen_staged_sig(def::Method, mi::MethodInstance)
+ isdefined(def, :generator) || return nothing
+ isdispatchtuple(mi.specTypes) || return nothing
+ gen = Core.Typeof(def.generator)
+ return Tuple{gen, UInt, Method, Vararg}
+ ## more precise method lookup, but more costly and likely not actually better?
+ #tts = (mi.specTypes::DataType).parameters
+ #sps = Any[Core.Typeof(mi.sparam_vals[i]) for i in 1:length(mi.sparam_vals)]
+ #if def.isva
+ # return Tuple{gen, UInt, Method, sps..., tts[1:def.nargs - 1]..., Tuple{tts[def.nargs - 1:end]...}}
+ #else
+ # return Tuple{gen, UInt, Method, sps..., tts...}
+ #end
+end
+
+function needs_instrumentation(codeinst::CodeInstance, mi::MethodInstance, def::Method, validation_world::UInt)
+ if JLOptions().code_coverage != 0 || JLOptions().malloc_log != 0
+ # test if the code needs to run with instrumentation, in which case we cannot use existing generated code
+ if isdefined(def, :debuginfo) ? # generated_only functions do not have debuginfo, so fall back to considering their codeinst debuginfo though this may be slower (and less accurate?)
+ Compiler.should_instrument(def.module, def.debuginfo) :
+ Compiler.should_instrument(def.module, codeinst.debuginfo)
+ return true
+ end
+ gensig = gen_staged_sig(def, mi)
+ if gensig !== nothing
+ # if this is defined by a generator, try to consider forcing re-running the generators too, to add coverage for them
+ minworld = Ref{UInt}(1)
+ maxworld = Ref{UInt}(typemax(UInt))
+ has_ambig = Ref{Int32}(0)
+ result = _methods_by_ftype(gensig, nothing, -1, validation_world, #=ambig=#false, minworld, maxworld, has_ambig)
+ if result !== nothing
+ for k = 1:length(result)
+ match = result[k]::Core.MethodMatch
+ genmethod = match.method
+ # no, I refuse to refuse to recurse into your cursed generated function generators and will only test one level deep here
+ if isdefined(genmethod, :debuginfo) && Compiler.should_instrument(genmethod.module, genmethod.debuginfo)
+ return true
+ end
+ end
+ end
+ end
+ end
+ return false
+end
+
# Test all edges relevant to a method:
# - Visit the entire call graph, starting from edges[idx] to determine if that method is valid
# - Implements Tarjan's SCC (strongly connected components) algorithm, simplified to remove the count variable
# and slightly modified with an early termination option once the computation reaches its minimum
-function verify_method(codeinst::CodeInstance, stack::Vector{CodeInstance}, visiting::IdDict{CodeInstance,Int}, mwis::IdSet{Method})
+function verify_method(codeinst::CodeInstance, stack::Vector{CodeInstance}, visiting::IdDict{CodeInstance,Int}, mwis::IdSet{Method}, validation_world::UInt)
world = codeinst.min_world
let max_valid2 = codeinst.max_world
if max_valid2 ≠ WORLD_AGE_REVALIDATION_SENTINEL
return 0, world, max_valid2
end
end
- current_world = get_world_counter()
- local minworld::UInt, maxworld::UInt = 1, current_world
- def = get_ci_mi(codeinst).def
- @assert def isa Method
+ mi = get_ci_mi(codeinst)
+ def = mi.def::Method
+ if needs_instrumentation(codeinst, mi, def, validation_world)
+ return 0, world, UInt(0)
+ end
+
+ # Implicitly referenced bindings in the current module do not get explicit edges.
+ # If they were invalidated, they'll be in `mwis`. If they weren't, they imply a minworld
+ # of `get_require_world`. In principle, this is only required for methods that do reference
+ # an implicit globalref. However, we already don't perform this validation for methods that
+ # don't have any (implicit or explicit) edges at all. The remaining corner case (some explicit,
+ # but no implicit edges) is rare and there would be little benefit to lower the minworld for it
+ # in any case, so we just always use `get_require_world` here.
+ local minworld::UInt, maxworld::UInt = get_require_world(), validation_world
if haskey(visiting, codeinst)
return visiting[codeinst], minworld, maxworld
end
@@ -97,9 +162,8 @@ function verify_method(codeinst::CodeInstance, stack::Vector{CodeInstance}, visi
# verify current edges
if isempty(callees)
# quick return: no edges to verify (though we probably shouldn't have gotten here from WORLD_AGE_REVALIDATION_SENTINEL)
- elseif maxworld == unsafe_load(cglobal(:jl_require_world, UInt))
+ elseif maxworld == get_require_world()
# if no new worlds were allocated since serializing the base module, then no new validation is worth doing right now either
- minworld = maxworld
else
j = 1
while j ≤ length(callees)
@@ -110,7 +174,7 @@ function verify_method(codeinst::CodeInstance, stack::Vector{CodeInstance}, visi
edge = get_ci_mi(edge)
end
if edge isa MethodInstance
- sig = typeintersect((edge.def::Method).sig, edge.specTypes) # TODO??
+ sig = edge.specTypes
min_valid2, max_valid2, matches = verify_call(sig, callees, j, 1, world)
j += 1
elseif edge isa Int
@@ -147,8 +211,7 @@ function verify_method(codeinst::CodeInstance, stack::Vector{CodeInstance}, visi
else
meth = callee::Method
end
- min_valid2, max_valid2 = verify_invokesig(edge, meth, world)
- matches = nothing
+ min_valid2, max_valid2, matches = verify_invokesig(edge, meth, world)
j += 2
end
if minworld < min_valid2
@@ -177,7 +240,7 @@ function verify_method(codeinst::CodeInstance, stack::Vector{CodeInstance}, visi
end
callee = edge
local min_valid2::UInt, max_valid2::UInt
- child_cycle, min_valid2, max_valid2 = verify_method(callee, stack, visiting, mwis)
+ child_cycle, min_valid2, max_valid2 = verify_method(callee, stack, visiting, mwis, validation_world)
if minworld < min_valid2
minworld = min_valid2
end
@@ -209,16 +272,14 @@ function verify_method(codeinst::CodeInstance, stack::Vector{CodeInstance}, visi
if maxworld ≠ 0
@atomic :monotonic child.min_world = minworld
end
- if maxworld == current_world
- Base.Compiler.store_backedges(child, child.edges)
- @atomic :monotonic child.max_world = typemax(UInt)
- else
- @atomic :monotonic child.max_world = maxworld
+ @atomic :monotonic child.max_world = maxworld
+ if maxworld == validation_world && validation_world == get_world_counter()
+ Compiler.store_backedges(child, child.edges)
end
@assert visiting[child] == length(stack) + 1
delete!(visiting, child)
invalidations = _jl_debug_method_invalidation[]
- if invalidations !== nothing && maxworld < current_world
+ if invalidations !== nothing && maxworld < validation_world
push!(invalidations, child, "verify_methods", cause)
end
end
@@ -227,11 +288,35 @@ end
function verify_call(@nospecialize(sig), expecteds::Core.SimpleVector, i::Int, n::Int, world::UInt)
# verify that these edges intersect with the same methods as before
+ if n == 1
+ # first, fast-path a check if the expected method simply dominates its sig anyways
+ # so the result of ml_matches is already simply known
+ let t = expecteds[i], meth, minworld, maxworld, result
+ if t isa Method
+ meth = t
+ else
+ if t isa CodeInstance
+ t = get_ci_mi(t)
+ else
+ t = t::MethodInstance
+ end
+ meth = t.def::Method
+ end
+ if !iszero(meth.dispatch_status & METHOD_SIG_LATEST_ONLY)
+ minworld = meth.primary_world
+ @assert minworld ≤ world
+ maxworld = typemax(UInt)
+ result = Any[] # result is unused
+ return minworld, maxworld, result
+ end
+ end
+ end
+ # next, compare the current result of ml_matches to the old result
lim = _jl_debug_method_invalidation[] !== nothing ? Int(typemax(Int32)) : n
minworld = Ref{UInt}(1)
maxworld = Ref{UInt}(typemax(UInt))
has_ambig = Ref{Int32}(0)
- result = Base._methods_by_ftype(sig, nothing, lim, world, #=ambig=#false, minworld, maxworld, has_ambig)
+ result = _methods_by_ftype(sig, nothing, lim, world, #=ambig=#false, minworld, maxworld, has_ambig)
if result === nothing
maxworld[] = 0
else
@@ -279,34 +364,40 @@ function verify_call(@nospecialize(sig), expecteds::Core.SimpleVector, i::Int, n
return minworld[], maxworld[], result
end
+# fast-path dispatch_status bit definitions (false indicates unknown)
+# true indicates this method would be returned as the result from `which` when invoking `method.sig` in the current latest world
+const METHOD_SIG_LATEST_WHICH = 0x1
+# true indicates this method would be returned as the only result from `methods` when calling `method.sig` in the current latest world
+const METHOD_SIG_LATEST_ONLY = 0x2
+
function verify_invokesig(@nospecialize(invokesig), expected::Method, world::UInt)
@assert invokesig isa Type
local minworld::UInt, maxworld::UInt
- if invokesig === expected.sig
- # the invoke match is `expected` for `expected->sig`, unless `expected` is invalid
+ matched = nothing
+ if invokesig === expected.sig && !iszero(expected.dispatch_status & METHOD_SIG_LATEST_WHICH)
+ # the invoke match is `expected` for `expected->sig`, unless `expected` is replaced
minworld = expected.primary_world
- maxworld = expected.deleted_world
@assert minworld ≤ world
- if maxworld < world
- maxworld = 0
- end
- else
- minworld = 1
maxworld = typemax(UInt)
- mt = Base.get_methodtable(expected)
+ else
+ mt = get_methodtable(expected)
if mt === nothing
+ minworld = 1
maxworld = 0
else
- matched, valid_worlds = Base.Compiler._findsup(invokesig, mt, world)
+ matched, valid_worlds = Compiler._findsup(invokesig, mt, world)
minworld, maxworld = valid_worlds.min_world, valid_worlds.max_world
if matched === nothing
maxworld = 0
- elseif matched.method != expected
- maxworld = 0
+ else
+ matched = Any[matched.method]
+ if matched[] !== expected
+ maxworld = 0
+ end
end
end
end
- return minworld, maxworld
+ return minworld, maxworld, matched
end
end # module StaticData
diff --git a/base/stream.jl b/base/stream.jl
index e81f65685df72..5732a62c2153b 100644
--- a/base/stream.jl
+++ b/base/stream.jl
@@ -615,9 +615,9 @@ end
## BUFFER ##
## Allocate space in buffer (for immediate use)
function alloc_request(buffer::IOBuffer, recommended_size::UInt)
- ensureroom(buffer, Int(recommended_size))
+ ensureroom(buffer, recommended_size)
ptr = buffer.append ? buffer.size + 1 : buffer.ptr
- nb = min(length(buffer.data)-buffer.offset, buffer.maxsize) + buffer.offset - ptr + 1
+ nb = min(length(buffer.data), buffer.maxsize + get_offset(buffer)) - ptr + 1
return (Ptr{Cvoid}(pointer(buffer.data, ptr)), nb)
end
@@ -942,8 +942,7 @@ function readbytes!(s::LibuvStream, a::Vector{UInt8}, nb::Int)
nread = readbytes!(sbuf, a, nb)
else
initsize = length(a)
- newbuf = PipeBuffer(a, maxsize=nb)
- newbuf.size = newbuf.offset # reset the write pointer to the beginning
+ newbuf = _truncated_pipebuffer(a; maxsize=nb)
nread = try
s.buffer = newbuf
write(newbuf, sbuf)
@@ -990,8 +989,7 @@ function unsafe_read(s::LibuvStream, p::Ptr{UInt8}, nb::UInt)
if bytesavailable(sbuf) >= nb
unsafe_read(sbuf, p, nb)
else
- newbuf = PipeBuffer(unsafe_wrap(Array, p, nb), maxsize=Int(nb))
- newbuf.size = newbuf.offset # reset the write pointer to the beginning
+ newbuf = _truncated_pipebuffer(unsafe_wrap(Array, p, nb); maxsize=Int(nb))
try
s.buffer = newbuf
write(newbuf, sbuf)
@@ -1559,6 +1557,63 @@ function wait_readnb(s::BufferStream, nb::Int)
end
end
+function readavailable(this::BufferStream)
+ bytes = lock(this.cond) do
+ wait_readnb(this, 1)
+ buf = this.buffer
+ @assert buf.seekable == false
+ take!(buf)
+ end
+ return bytes
+end
+
+function read(stream::BufferStream)
+ bytes = lock(stream.cond) do
+ wait_close(stream)
+ take!(stream.buffer)
+ end
+ return bytes
+end
+
+function readbytes!(s::BufferStream, a::Vector{UInt8}, nb::Int)
+ sbuf = s.buffer
+ @assert sbuf.seekable == false
+ @assert sbuf.maxsize >= nb
+
+ function wait_locked(s, buf, nb)
+ while bytesavailable(buf) < nb
+ s.readerror === nothing || throw(s.readerror)
+ isopen(s) || break
+ s.status != StatusEOF || break
+ wait_readnb(s, nb)
+ end
+ end
+
+ bytes = lock(s.cond) do
+ if nb <= SZ_UNBUFFERED_IO # Under this limit we are OK with copying the array from the stream's buffer
+ wait_locked(s, sbuf, nb)
+ end
+ if bytesavailable(sbuf) >= nb
+ nread = readbytes!(sbuf, a, nb)
+ else
+ initsize = length(a)
+ newbuf = _truncated_pipebuffer(a; maxsize=nb)
+ nread = try
+ s.buffer = newbuf
+ write(newbuf, sbuf)
+ wait_locked(s, newbuf, nb)
+ bytesavailable(newbuf)
+ finally
+ s.buffer = sbuf
+ end
+ _take!(a, _unsafe_take!(newbuf))
+ length(a) >= initsize || resize!(a, initsize)
+ end
+ return nread
+ end
+ return bytes
+end
+
show(io::IO, s::BufferStream) = print(io, "BufferStream(bytes waiting=", bytesavailable(s.buffer), ", isopen=", isopen(s), ")")
function readuntil(s::BufferStream, c::UInt8; keep::Bool=false)
diff --git a/base/strings/annotated.jl b/base/strings/annotated.jl
index 814ee2afa9d55..1fbbdc1dc44e9 100644
--- a/base/strings/annotated.jl
+++ b/base/strings/annotated.jl
@@ -147,11 +147,11 @@ promote_rule(::Type{<:AnnotatedString}, ::Type{<:AbstractString}) = AnnotatedStr
## AbstractString interface ##
-ncodeunits(s::AnnotatedString) = ncodeunits(s.string)
+ncodeunits(s::AnnotatedString) = ncodeunits(s.string)::Int
codeunits(s::AnnotatedString) = codeunits(s.string)
codeunit(s::AnnotatedString) = codeunit(s.string)
codeunit(s::AnnotatedString, i::Integer) = codeunit(s.string, i)
-isvalid(s::AnnotatedString, i::Integer) = isvalid(s.string, i)
+isvalid(s::AnnotatedString, i::Integer) = isvalid(s.string, i)::Bool
@propagate_inbounds iterate(s::AnnotatedString, i::Integer=firstindex(s)) =
if i <= lastindex(s.string); (s[i], nextind(s, i)) end
eltype(::Type{<:AnnotatedString{S}}) where {S} = AnnotatedChar{eltype(S)}
@@ -460,201 +460,109 @@ function annotated_chartransform(f::Function, str::AnnotatedString, state=nothin
AnnotatedString(String(take!(outstr)), annots)
end
-## AnnotatedIOBuffer
-
-struct AnnotatedIOBuffer <: AbstractPipe
- io::IOBuffer
- annotations::Vector{RegionAnnotation}
-end
-
-AnnotatedIOBuffer(io::IOBuffer) = AnnotatedIOBuffer(io, Vector{RegionAnnotation}())
-AnnotatedIOBuffer() = AnnotatedIOBuffer(IOBuffer())
-
-function show(io::IO, aio::AnnotatedIOBuffer)
- show(io, AnnotatedIOBuffer)
- size = filesize(aio.io)
- print(io, '(', size, " byte", ifelse(size == 1, "", "s"), ", ",
- length(aio.annotations), " annotation", ifelse(length(aio.annotations) == 1, "", "s"), ")")
+struct RegionIterator{S <: AbstractString}
+ str::S
+ regions::Vector{UnitRange{Int}}
+ annotations::Vector{Vector{Annotation}}
end
-pipe_reader(io::AnnotatedIOBuffer) = io.io
-pipe_writer(io::AnnotatedIOBuffer) = io.io
-
-# Useful `IOBuffer` methods that we don't get from `AbstractPipe`
-position(io::AnnotatedIOBuffer) = position(io.io)
-seek(io::AnnotatedIOBuffer, n::Integer) = (seek(io.io, n); io)
-seekend(io::AnnotatedIOBuffer) = (seekend(io.io); io)
-skip(io::AnnotatedIOBuffer, n::Integer) = (skip(io.io, n); io)
-copy(io::AnnotatedIOBuffer) = AnnotatedIOBuffer(copy(io.io), copy(io.annotations))
-
-annotations(io::AnnotatedIOBuffer) = io.annotations
-
-annotate!(io::AnnotatedIOBuffer, range::UnitRange{Int}, label::Symbol, @nospecialize(val::Any)) =
- (_annotate!(io.annotations, range, label, val); io)
-
-function write(io::AnnotatedIOBuffer, astr::Union{AnnotatedString, SubString{<:AnnotatedString}})
- astr = AnnotatedString(astr)
- offset = position(io.io)
- eof(io) || _clear_annotations_in_region!(io.annotations, offset+1:offset+ncodeunits(astr))
- _insert_annotations!(io, astr.annotations)
- write(io.io, String(astr))
-end
+Base.length(si::RegionIterator) = length(si.regions)
-write(io::AnnotatedIOBuffer, c::AnnotatedChar) =
- write(io, AnnotatedString(string(c), [(region=1:ncodeunits(c), a...) for a in c.annotations]))
-write(io::AnnotatedIOBuffer, x::AbstractString) = write(io.io, x)
-write(io::AnnotatedIOBuffer, s::Union{SubString{String}, String}) = write(io.io, s)
-write(io::AnnotatedIOBuffer, b::UInt8) = write(io.io, b)
-
-function write(dest::AnnotatedIOBuffer, src::AnnotatedIOBuffer)
- destpos = position(dest)
- isappending = eof(dest)
- srcpos = position(src)
- nb = write(dest.io, src.io)
- isappending || _clear_annotations_in_region!(dest.annotations, destpos:destpos+nb)
- srcannots = [setindex(annot, max(1 + srcpos, first(annot.region)):last(annot.region), :region)
- for annot in src.annotations if first(annot.region) >= srcpos]
- _insert_annotations!(dest, srcannots, destpos - srcpos)
- nb
+Base.@propagate_inbounds function Base.iterate(si::RegionIterator, i::Integer=1)
+ if i <= length(si.regions)
+ @inbounds ((SubString(si.str, si.regions[i]), si.annotations[i]), i+1)
+ end
end
-# So that read/writes with `IOContext` (and any similar `AbstractPipe` wrappers)
-# work as expected.
-function write(io::AbstractPipe, s::Union{AnnotatedString, SubString{<:AnnotatedString}})
- if pipe_writer(io) isa AnnotatedIOBuffer
- write(pipe_writer(io), s)
- else
- invoke(write, Tuple{IO, typeof(s)}, io, s)
- end::Int
-end
-# Can't be part of the `Union` above because it introduces method ambiguities
-function write(io::AbstractPipe, c::AnnotatedChar)
- if pipe_writer(io) isa AnnotatedIOBuffer
- write(pipe_writer(io), c)
- else
- invoke(write, Tuple{IO, typeof(c)}, io, c)
- end::Int
-end
+Base.eltype(::RegionIterator{S}) where { S <: AbstractString} =
+ Tuple{SubString{S}, Vector{Annotation}}
"""
- _clear_annotations_in_region!(annotations::Vector{$RegionAnnotation}, span::UnitRange{Int})
+ eachregion(s::AnnotatedString{S})
+ eachregion(s::SubString{AnnotatedString{S}})
-Erase the presence of `annotations` within a certain `span`.
+Identify the contiguous substrings of `s` with a constant annotations, and return
+an iterator which provides each substring and the applicable annotations as a
+`Tuple{SubString{S}, Vector{$Annotation}}`.
-This operates by removing all elements of `annotations` that are entirely
-contained in `span`, truncating ranges that partially overlap, and splitting
-annotations that subsume `span` to just exist either side of `span`.
+# Examples
+
+```jldoctest; setup=:(using Base: AnnotatedString, eachregion)
+julia> collect(eachregion(AnnotatedString(
+ "hey there", [(1:3, :face, :bold),
+ (5:9, :face, :italic)])))
+3-element Vector{Tuple{SubString{String}, Vector{$Annotation}}}:
+ ("hey", [$Annotation((:face, :bold))])
+ (" ", [])
+ ("there", [$Annotation((:face, :italic))])
+```
"""
-function _clear_annotations_in_region!(annotations::Vector{RegionAnnotation}, span::UnitRange{Int})
- # Clear out any overlapping pre-existing annotations.
- filter!(ann -> first(ann.region) < first(span) || last(ann.region) > last(span), annotations)
- extras = Tuple{Int, RegionAnnotation}[]
- for i in eachindex(annotations)
- annot = annotations[i]
- region = annot.region
- # Test for partial overlap
- if first(region) <= first(span) <= last(region) || first(region) <= last(span) <= last(region)
- annotations[i] =
- setindex(annot,
- if first(region) < first(span)
- first(region):first(span)-1
- else
- last(span)+1:last(region)
- end,
- :region)
- # If `span` fits exactly within `region`, then we've only copied over
- # the beginning overhang, but also need to conserve the end overhang.
- if first(region) < first(span) && last(span) < last(region)
- push!(extras, (i, setindex(annot, last(span)+1:last(region), :region)))
- end
+function eachregion(s::AnnotatedString, subregion::UnitRange{Int}=firstindex(s):lastindex(s))
+ isempty(s) || isempty(subregion) &&
+ return RegionIterator(s.string, UnitRange{Int}[], Vector{Annotation}[])
+ events = annotation_events(s, subregion)
+ isempty(events) && return RegionIterator(s.string, [subregion], [Annotation[]])
+ annotvals = Annotation[
+ (; label, value) for (; label, value) in annotations(s)]
+ regions = Vector{UnitRange{Int}}()
+ annots = Vector{Vector{Annotation}}()
+ pos = first(events).pos
+ if pos > first(subregion)
+ push!(regions, thisind(s, first(subregion)):prevind(s, pos))
+ push!(annots, [])
+ end
+ activelist = Int[]
+ for event in events
+ if event.pos != pos
+ push!(regions, pos:prevind(s, event.pos))
+ push!(annots, annotvals[activelist])
+ pos = event.pos
+ end
+ if event.active
+ insert!(activelist, searchsortedfirst(activelist, event.index), event.index)
+ else
+ deleteat!(activelist, searchsortedfirst(activelist, event.index))
end
end
- # Insert any extra entries in the appropriate position
- for (offset, (i, entry)) in enumerate(extras)
- insert!(annotations, i + offset, entry)
+ if last(events).pos < nextind(s, last(subregion))
+ push!(regions, last(events).pos:thisind(s, last(subregion)))
+ push!(annots, [])
end
- annotations
+ RegionIterator(s.string, regions, annots)
end
-"""
- _insert_annotations!(io::AnnotatedIOBuffer, annotations::Vector{$RegionAnnotation}, offset::Int = position(io))
+function eachregion(s::SubString{<:AnnotatedString}, pos::UnitRange{Int}=firstindex(s):lastindex(s))
+ if isempty(s)
+ RegionIterator(s.string, Vector{UnitRange{Int}}(), Vector{Vector{Annotation}}())
+ else
+ eachregion(s.string, first(pos)+s.offset:last(pos)+s.offset)
+ end
+end
-Register new `annotations` in `io`, applying an `offset` to their regions.
+"""
+ annotation_events(string::AbstractString, annots::Vector{$RegionAnnotation}, subregion::UnitRange{Int})
+ annotation_events(string::AnnotatedString, subregion::UnitRange{Int})
-The largely consists of simply shifting the regions of `annotations` by `offset`
-and pushing them onto `io`'s annotations. However, when it is possible to merge
-the new annotations with recent annotations in accordance with the semantics
-outlined in [`AnnotatedString`](@ref), we do so. More specifically, when there
-is a run of the most recent annotations that are also present as the first
-`annotations`, with the same value and adjacent regions, the new annotations are
-merged into the existing recent annotations by simply extending their range.
+Find all annotation "change events" that occur within a `subregion` of `annots`,
+with respect to `string`. When `string` is styled, `annots` is inferred.
-This is implemented so that one can say write an `AnnotatedString` to an
-`AnnotatedIOBuffer` one character at a time without needlessly producing a
-new annotation for each character.
+Each change event is given in the form of a `@NamedTuple{pos::Int, active::Bool,
+index::Int}` where `pos` is the position of the event, `active` is a boolean
+indicating whether the annotation is being activated or deactivated, and `index`
+is the index of the annotation in question.
"""
-function _insert_annotations!(io::AnnotatedIOBuffer, annotations::Vector{RegionAnnotation}, offset::Int = position(io))
- run = 0
- if !isempty(io.annotations) && last(last(io.annotations).region) == offset
- for i in reverse(axes(annotations, 1))
- annot = annotations[i]
- first(annot.region) == 1 || continue
- i <= length(io.annotations) || continue
- if annot.label == last(io.annotations).label && annot.value == last(io.annotations).value
- valid_run = true
- for runlen in 1:i
- new = annotations[begin+runlen-1]
- old = io.annotations[end-i+runlen]
- if last(old.region) != offset || first(new.region) != 1 || old.label != new.label || old.value != new.value
- valid_run = false
- break
- end
- end
- if valid_run
- run = i
- break
- end
- end
+function annotation_events(s::AbstractString, annots::Vector{RegionAnnotation}, subregion::UnitRange{Int})
+ events = Vector{NamedTuple{(:pos, :active, :index), Tuple{Int, Bool, Int}}}() # Position, Active?, Annotation index
+ for (i, (; region)) in enumerate(annots)
+ if !isempty(intersect(subregion, region))
+ start, stop = max(first(subregion), first(region)), min(last(subregion), last(region))
+ start <= stop || continue # Currently can't handle empty regions
+ push!(events, (pos=thisind(s, start), active=true, index=i))
+ push!(events, (pos=nextind(s, stop), active=false, index=i))
end
end
- for runindex in 0:run-1
- old_index = lastindex(io.annotations) - run + 1 + runindex
- old = io.annotations[old_index]
- new = annotations[begin+runindex]
- io.annotations[old_index] = setindex(old, first(old.region):last(new.region)+offset, :region)
- end
- for index in run+1:lastindex(annotations)
- annot = annotations[index]
- start, stop = first(annot.region), last(annot.region)
- push!(io.annotations, setindex(annotations[index], start+offset:stop+offset, :region))
- end
+ sort(events, by=e -> e.pos)
end
-function read(io::AnnotatedIOBuffer, ::Type{AnnotatedString{T}}) where {T <: AbstractString}
- if (start = position(io)) == 0
- AnnotatedString(read(io.io, T), copy(io.annotations))
- else
- annots = [setindex(annot, UnitRange{Int}(max(1, first(annot.region) - start), last(annot.region)-start), :region)
- for annot in io.annotations if last(annot.region) > start]
- AnnotatedString(read(io.io, T), annots)
- end
-end
-read(io::AnnotatedIOBuffer, ::Type{AnnotatedString{AbstractString}}) = read(io, AnnotatedString{String})
-read(io::AnnotatedIOBuffer, ::Type{AnnotatedString}) = read(io, AnnotatedString{String})
-
-function read(io::AnnotatedIOBuffer, ::Type{AnnotatedChar{T}}) where {T <: AbstractChar}
- pos = position(io)
- char = read(io.io, T)
- annots = [NamedTuple{(:label, :value)}(annot) for annot in io.annotations if pos+1 in annot.region]
- AnnotatedChar(char, annots)
-end
-read(io::AnnotatedIOBuffer, ::Type{AnnotatedChar{AbstractChar}}) = read(io, AnnotatedChar{Char})
-read(io::AnnotatedIOBuffer, ::Type{AnnotatedChar}) = read(io, AnnotatedChar{Char})
-
-function truncate(io::AnnotatedIOBuffer, size::Integer)
- truncate(io.io, size)
- filter!(ann -> first(ann.region) <= size, io.annotations)
- map!(ann -> setindex(ann, first(ann.region):min(size, last(ann.region)), :region),
- io.annotations, io.annotations)
- io
-end
+annotation_events(s::AnnotatedString, subregion::UnitRange{Int}) =
+ annotation_events(s.string, annotations(s), subregion)
diff --git a/base/strings/annotated_io.jl b/base/strings/annotated_io.jl
new file mode 100644
index 0000000000000..9698fd5909b68
--- /dev/null
+++ b/base/strings/annotated_io.jl
@@ -0,0 +1,276 @@
+# This file is a part of Julia. License is MIT: https://julialang.org/license
+
+## AnnotatedIOBuffer
+
+struct AnnotatedIOBuffer <: AbstractPipe
+ io::IOBuffer
+ annotations::Vector{RegionAnnotation}
+end
+
+AnnotatedIOBuffer(io::IOBuffer) = AnnotatedIOBuffer(io, Vector{RegionAnnotation}())
+AnnotatedIOBuffer() = AnnotatedIOBuffer(IOBuffer())
+
+function show(io::IO, aio::AnnotatedIOBuffer)
+ show(io, AnnotatedIOBuffer)
+ size = filesize(aio.io)
+ print(io, '(', size, " byte", ifelse(size == 1, "", "s"), ", ",
+ length(aio.annotations), " annotation", ifelse(length(aio.annotations) == 1, "", "s"), ")")
+end
+
+pipe_reader(io::AnnotatedIOBuffer) = io.io
+pipe_writer(io::AnnotatedIOBuffer) = io.io
+
+# Useful `IOBuffer` methods that we don't get from `AbstractPipe`
+position(io::AnnotatedIOBuffer) = position(io.io)
+seek(io::AnnotatedIOBuffer, n::Integer) = (seek(io.io, n); io)
+seekend(io::AnnotatedIOBuffer) = (seekend(io.io); io)
+skip(io::AnnotatedIOBuffer, n::Integer) = (skip(io.io, n); io)
+copy(io::AnnotatedIOBuffer) = AnnotatedIOBuffer(copy(io.io), copy(io.annotations))
+
+annotations(io::AnnotatedIOBuffer) = io.annotations
+
+annotate!(io::AnnotatedIOBuffer, range::UnitRange{Int}, label::Symbol, @nospecialize(val::Any)) =
+ (_annotate!(io.annotations, range, label, val); io)
+
+function write(io::AnnotatedIOBuffer, astr::Union{AnnotatedString, SubString{<:AnnotatedString}})
+ astr = AnnotatedString(astr)
+ offset = position(io.io)
+ eof(io) || _clear_annotations_in_region!(io.annotations, offset+1:offset+ncodeunits(astr))
+ _insert_annotations!(io, astr.annotations)
+ write(io.io, String(astr))
+end
+
+write(io::AnnotatedIOBuffer, c::AnnotatedChar) =
+ write(io, AnnotatedString(string(c), [(region=1:ncodeunits(c), a...) for a in c.annotations]))
+write(io::AnnotatedIOBuffer, x::AbstractString) = write(io.io, x)
+write(io::AnnotatedIOBuffer, s::Union{SubString{String}, String}) = write(io.io, s)
+write(io::AnnotatedIOBuffer, b::UInt8) = write(io.io, b)
+
+function write(dest::AnnotatedIOBuffer, src::AnnotatedIOBuffer)
+ destpos = position(dest)
+ isappending = eof(dest)
+ srcpos = position(src)
+ nb = write(dest.io, src.io)
+ isappending || _clear_annotations_in_region!(dest.annotations, destpos:destpos+nb)
+ srcannots = [setindex(annot, max(1 + srcpos, first(annot.region)):last(annot.region), :region)
+ for annot in src.annotations if first(annot.region) >= srcpos]
+ _insert_annotations!(dest, srcannots, destpos - srcpos)
+ nb
+end
+
+# So that read/writes with `IOContext` (and any similar `AbstractPipe` wrappers)
+# work as expected.
+function write(io::AbstractPipe, s::Union{AnnotatedString, SubString{<:AnnotatedString}})
+ if pipe_writer(io) isa AnnotatedIOBuffer
+ write(pipe_writer(io), s)
+ else
+ invoke(write, Tuple{IO, typeof(s)}, io, s)
+ end::Int
+end
+
+# Can't be part of the `Union` above because it introduces method ambiguities
+function write(io::AbstractPipe, c::AnnotatedChar)
+ if pipe_writer(io) isa AnnotatedIOBuffer
+ write(pipe_writer(io), c)
+ else
+ invoke(write, Tuple{IO, typeof(c)}, io, c)
+ end::Int
+end
+
+function read(io::AnnotatedIOBuffer, ::Type{AnnotatedString{T}}) where {T <: AbstractString}
+ if (start = position(io)) == 0
+ AnnotatedString(read(io.io, T), copy(io.annotations))
+ else
+ annots = [setindex(annot, UnitRange{Int}(max(1, first(annot.region) - start), last(annot.region)-start), :region)
+ for annot in io.annotations if last(annot.region) > start]
+ AnnotatedString(read(io.io, T), annots)
+ end
+end
+read(io::AnnotatedIOBuffer, ::Type{AnnotatedString{AbstractString}}) = read(io, AnnotatedString{String})
+read(io::AnnotatedIOBuffer, ::Type{AnnotatedString}) = read(io, AnnotatedString{String})
+
+function read(io::AnnotatedIOBuffer, ::Type{AnnotatedChar{T}}) where {T <: AbstractChar}
+ pos = position(io)
+ char = read(io.io, T)
+ annots = [NamedTuple{(:label, :value)}(annot) for annot in io.annotations if pos+1 in annot.region]
+ AnnotatedChar(char, annots)
+end
+read(io::AnnotatedIOBuffer, ::Type{AnnotatedChar{AbstractChar}}) = read(io, AnnotatedChar{Char})
+read(io::AnnotatedIOBuffer, ::Type{AnnotatedChar}) = read(io, AnnotatedChar{Char})
+
+function truncate(io::AnnotatedIOBuffer, size::Integer)
+ truncate(io.io, size)
+ filter!(ann -> first(ann.region) <= size, io.annotations)
+ map!(ann -> setindex(ann, first(ann.region):min(size, last(ann.region)), :region),
+ io.annotations, io.annotations)
+ io
+end
+
+"""
+ _clear_annotations_in_region!(annotations::Vector{$RegionAnnotation}, span::UnitRange{Int})
+
+Erase the presence of `annotations` within a certain `span`.
+
+This operates by removing all elements of `annotations` that are entirely
+contained in `span`, truncating ranges that partially overlap, and splitting
+annotations that subsume `span` to just exist either side of `span`.
+"""
+function _clear_annotations_in_region!(annotations::Vector{RegionAnnotation}, span::UnitRange{Int})
+ # Clear out any overlapping pre-existing annotations.
+ filter!(ann -> first(ann.region) < first(span) || last(ann.region) > last(span), annotations)
+ extras = Tuple{Int, RegionAnnotation}[]
+ for i in eachindex(annotations)
+ annot = annotations[i]
+ region = annot.region
+ # Test for partial overlap
+ if first(region) <= first(span) <= last(region) || first(region) <= last(span) <= last(region)
+ annotations[i] =
+ setindex(annot,
+ if first(region) < first(span)
+ first(region):first(span)-1
+ else
+ last(span)+1:last(region)
+ end,
+ :region)
+ # If `span` fits exactly within `region`, then we've only copied over
+ # the beginning overhang, but also need to conserve the end overhang.
+ if first(region) < first(span) && last(span) < last(region)
+ push!(extras, (i, setindex(annot, last(span)+1:last(region), :region)))
+ end
+ end
+ end
+ # Insert any extra entries in the appropriate position
+ for (offset, (i, entry)) in enumerate(extras)
+ insert!(annotations, i + offset, entry)
+ end
+ annotations
+end
+
+"""
+ _insert_annotations!(io::AnnotatedIOBuffer, annotations::Vector{$RegionAnnotation}, offset::Int = position(io))
+
+Register new `annotations` in `io`, applying an `offset` to their regions.
+
+The largely consists of simply shifting the regions of `annotations` by `offset`
+and pushing them onto `io`'s annotations. However, when it is possible to merge
+the new annotations with recent annotations in accordance with the semantics
+outlined in [`AnnotatedString`](@ref), we do so. More specifically, when there
+is a run of the most recent annotations that are also present as the first
+`annotations`, with the same value and adjacent regions, the new annotations are
+merged into the existing recent annotations by simply extending their range.
+
+This is implemented so that one can say write an `AnnotatedString` to an
+`AnnotatedIOBuffer` one character at a time without needlessly producing a
+new annotation for each character.
+"""
+function _insert_annotations!(io::AnnotatedIOBuffer, annotations::Vector{RegionAnnotation}, offset::Int = position(io))
+ run = 0
+ if !isempty(io.annotations) && last(last(io.annotations).region) == offset
+ for i in reverse(axes(annotations, 1))
+ annot = annotations[i]
+ first(annot.region) == 1 || continue
+ i <= length(io.annotations) || continue
+ if annot.label == last(io.annotations).label && annot.value == last(io.annotations).value
+ valid_run = true
+ for runlen in 1:i
+ new = annotations[begin+runlen-1]
+ old = io.annotations[end-i+runlen]
+ if last(old.region) != offset || first(new.region) != 1 || old.label != new.label || old.value != new.value
+ valid_run = false
+ break
+ end
+ end
+ if valid_run
+ run = i
+ break
+ end
+ end
+ end
+ end
+ for runindex in 0:run-1
+ old_index = lastindex(io.annotations) - run + 1 + runindex
+ old = io.annotations[old_index]
+ new = annotations[begin+runindex]
+ io.annotations[old_index] = setindex(old, first(old.region):last(new.region)+offset, :region)
+ end
+ for index in run+1:lastindex(annotations)
+ annot = annotations[index]
+ start, stop = first(annot.region), last(annot.region)
+ push!(io.annotations, setindex(annotations[index], start+offset:stop+offset, :region))
+ end
+end
+
+# NOTE: This is an interim solution to the invalidations caused
+# by the split styled display implementation. This should be
+# replaced by a more robust solution (such as a consolidation of
+# the type and method definitions) in the near future.
+module AnnotatedDisplay
+
+using ..Base: IO, SubString, AnnotatedString, AnnotatedChar, AnnotatedIOBuffer
+using ..Base: eachregion, invoke_in_world, tls_world_age
+
+# Write
+
+ansi_write(f::Function, io::IO, x::Any) = f(io, String(x))
+
+ansi_write_(f::Function, io::IO, @nospecialize(x::Any)) =
+ invoke_in_world(tls_world_age(), ansi_write, f, io, x)
+
+Base.write(io::IO, s::Union{<:AnnotatedString, SubString{<:AnnotatedString}}) =
+ ansi_write_(write, io, s)::Int
+
+Base.write(io::IO, c::AnnotatedChar) =
+ ansi_write_(write, io, c)::Int
+
+function Base.write(io::IO, aio::AnnotatedIOBuffer)
+ if get(io, :color, false) == true
+ # This does introduce an overhead that technically
+ # could be avoided, but I'm not sure that it's currently
+ # worth the effort to implement an efficient version of
+ # writing from a AnnotatedIOBuffer with style.
+ # In the meantime, by converting to an `AnnotatedString` we can just
+ # reuse all the work done to make that work.
+ ansi_write_(write, io, read(aio, AnnotatedString))::Int
+ else
+ write(io, aio.io)
+ end
+end
+
+# Print
+
+Base.print(io::IO, s::Union{<:AnnotatedString, SubString{<:AnnotatedString}}) =
+ (ansi_write_(write, io, s); nothing)
+
+Base.print(io::IO, s::AnnotatedChar) =
+ (ansi_write_(write, io, s); nothing)
+
+Base.print(io::AnnotatedIOBuffer, s::Union{<:AnnotatedString, SubString{<:AnnotatedString}}) =
+ (write(io, s); nothing)
+
+Base.print(io::AnnotatedIOBuffer, c::AnnotatedChar) =
+ (write(io, c); nothing)
+
+# Escape
+
+Base.escape_string(io::IO, s::Union{<:AnnotatedString, SubString{<:AnnotatedString}},
+ esc = ""; keep = (), ascii::Bool=false, fullhex::Bool=false) =
+ (ansi_write_((io, s) -> escape_string(io, s, esc; keep, ascii, fullhex), io, s); nothing)
+
+# Show
+
+show_annot(io::IO, ::Any) = nothing
+show_annot(io::IO, ::MIME, ::Any) = nothing
+
+show_annot_(io::IO, @nospecialize(x::Any)) =
+ invoke_in_world(tls_world_age(), show_annot, io, x)::Nothing
+
+show_annot_(io::IO, m::MIME, @nospecialize(x::Any)) =
+ invoke_in_world(tls_world_age(), show_annot, io, m, x)::Nothing
+
+Base.show(io::IO, m::MIME"text/html", s::Union{<:AnnotatedString, SubString{<:AnnotatedString}}) =
+ show_annot_(io, m, s)
+
+Base.show(io::IO, m::MIME"text/html", c::AnnotatedChar) =
+ show_annot_(io, m, c)
+
+end
diff --git a/base/strings/basic.jl b/base/strings/basic.jl
index bf11199143c1e..c40deb0656ced 100644
--- a/base/strings/basic.jl
+++ b/base/strings/basic.jl
@@ -1,5 +1,7 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license
+import Core: Symbol
+
"""
The `AbstractString` type is the supertype of all string implementations in
Julia. Strings are encodings of sequences of [Unicode](https://unicode.org/)
@@ -511,11 +513,11 @@ prevind(s::AbstractString, i::Int) = prevind(s, i, 1)
function prevind(s::AbstractString, i::Int, n::Int)
n < 0 && throw(ArgumentError("n cannot be negative: $n"))
- z = ncodeunits(s) + 1
+ z = ncodeunits(s)::Int + 1
@boundscheck 0 < i ≤ z || throw(BoundsError(s, i))
- n == 0 && return thisind(s, i) == i ? i : string_index_err(s, i)
+ n == 0 && return thisind(s, i)::Int == i ? i : string_index_err(s, i)
while n > 0 && 1 < i
- @inbounds n -= isvalid(s, i -= 1)
+ @inbounds n -= isvalid(s, i -= 1)::Bool
end
return i - n
end
@@ -570,11 +572,11 @@ nextind(s::AbstractString, i::Int) = nextind(s, i, 1)
function nextind(s::AbstractString, i::Int, n::Int)
n < 0 && throw(ArgumentError("n cannot be negative: $n"))
- z = ncodeunits(s)
+ z = ncodeunits(s)::Int
@boundscheck 0 ≤ i ≤ z || throw(BoundsError(s, i))
- n == 0 && return thisind(s, i) == i ? i : string_index_err(s, i)
+ n == 0 && return thisind(s, i)::Int == i ? i : string_index_err(s, i)
while n > 0 && i < z
- @inbounds n -= isvalid(s, i += 1)
+ @inbounds n -= isvalid(s, i += 1)::Bool
end
return i + n
end
diff --git a/base/strings/io.jl b/base/strings/io.jl
index b4a3c7ad3e0c2..b27a6049f0b0e 100644
--- a/base/strings/io.jl
+++ b/base/strings/io.jl
@@ -589,7 +589,7 @@ julia> v[2]
0x32
```
"""
-macro b_str(s)
+macro b_str(s::String)
v = codeunits(unescape_string(s))
QuoteNode(v)
end
diff --git a/base/strings/string.jl b/base/strings/string.jl
index 9f3c3d00e4b81..79ec12d11cb94 100644
--- a/base/strings/string.jl
+++ b/base/strings/string.jl
@@ -107,7 +107,7 @@ end
# but the macro is not available at this time in bootstrap, so we write it manually.
const _string_n_override = 0x04ee
@eval _string_n(n::Integer) = $(Expr(:foreigncall, QuoteNode(:jl_alloc_string), Ref{String},
- :(Core.svec(Csize_t)), 1, QuoteNode((:ccall, _string_n_override)), :(convert(Csize_t, n))))
+ :(Core.svec(Csize_t)), 1, QuoteNode((:ccall, _string_n_override, false)), :(convert(Csize_t, n))))
"""
String(s::AbstractString)
diff --git a/base/strings/strings.jl b/base/strings/strings.jl
index 8dae311f475b4..32975b6ea3fc7 100644
--- a/base/strings/strings.jl
+++ b/base/strings/strings.jl
@@ -11,3 +11,4 @@ import .Iterators: PartitionIterator
include("strings/util.jl")
include("strings/io.jl")
+include("strings/annotated_io.jl")
diff --git a/base/summarysize.jl b/base/summarysize.jl
index 4f2646c7641b7..62b0ad0849778 100644
--- a/base/summarysize.jl
+++ b/base/summarysize.jl
@@ -149,13 +149,8 @@ function (ss::SummarySize)(obj::GenericMemory)
datakey = unsafe_convert(Ptr{Cvoid}, obj)
if !haskey(ss.seen, datakey)
ss.seen[datakey] = true
- dsize = sizeof(obj)
+ size += sizeof(obj)
T = eltype(obj)
- if isbitsunion(T)
- # add 1 union selector byte for each element
- dsize += length(obj)
- end
- size += dsize
if !isempty(obj) && T !== Symbol && (!Base.allocatedinline(T) || (T isa DataType && !Base.datatype_pointerfree(T)))
push!(ss.frontier_x, obj)
push!(ss.frontier_i, 1)
diff --git a/base/sysimg.jl b/base/sysimg.jl
index 42f54a849f157..8adb05ece0b2c 100644
--- a/base/sysimg.jl
+++ b/base/sysimg.jl
@@ -1,21 +1,12 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license
-# Can be loaded on top of either an existing system image built from
-# `Base_compiler.jl` or standalone, in which case we will build it now.
-let had_compiler = isdefined(Main, :Base)
-if had_compiler; else
-include("Base_compiler.jl")
-end
-
-Core.include(Base, "Base.jl")
-
-had_compiler && ccall(:jl_init_restored_module, Cvoid, (Any,), Base)
-end
+Base.Core.include(Base, "Base.jl") # finish populating Base (currently just has the Compiler)
+# Set up Main module by importing from Base
using .Base
+using .Base.MainInclude # ans, err, and sometimes Out
-# Set up Main module
-using Base.MainInclude # ans, err, and sometimes Out
+ccall(:jl_init_restored_module, Cvoid, (Any,), Base)
# These definitions calls Base._include rather than Base.include to get
# one-frame stacktraces for the common case of using include(fname) in Main.
@@ -61,7 +52,7 @@ definition of `eval`, which evaluates expressions in that module.
const eval = Core.EvalInto(Main)
# Ensure this file is also tracked
-pushfirst!(Base._included_files, (@__MODULE__, abspath(@__FILE__)))
+pushfirst!(Base._included_files, (Main, abspath(@__FILE__)))
# set up depot & load paths to be able to find stdlib packages
Base.init_depot_path()
diff --git a/base/task.jl b/base/task.jl
index 951e980ee903c..e33a7e4efddf6 100644
--- a/base/task.jl
+++ b/base/task.jl
@@ -1145,6 +1145,16 @@ function throwto(t::Task, @nospecialize exc)
return try_yieldto(identity)
end
+@inline function wait_forever()
+ while true
+ wait()
+ end
+end
+
+const get_sched_task = OncePerThread{Task}() do
+ Task(wait_forever)
+end
+
function ensure_rescheduled(othertask::Task)
ct = current_task()
W = workqueue_for(Threads.threadid())
@@ -1181,25 +1191,39 @@ end
checktaskempty = Partr.multiq_check_empty
-@noinline function poptask(W::StickyWorkqueue)
- task = trypoptask(W)
- if !(task isa Task)
- task = ccall(:jl_task_get_next, Ref{Task}, (Any, Any, Any), trypoptask, W, checktaskempty)
- end
- set_next_task(task)
- nothing
-end
-
function wait()
ct = current_task()
# [task] user_time -yield-or-done-> wait_time
record_running_time!(ct)
+ # let GC run
GC.safepoint()
- W = workqueue_for(Threads.threadid())
- poptask(W)
- result = try_yieldto(ensure_rescheduled)
+ # check for libuv events
process_events()
- # return when we come out of the queue
+
+ # get the next task to run
+ result = nothing
+ have_result = false
+ W = workqueue_for(Threads.threadid())
+ task = trypoptask(W)
+ if !(task isa Task)
+ # No tasks to run; switch to the scheduler task to run the
+ # thread sleep logic.
+ sched_task = get_sched_task()
+ if ct !== sched_task
+ result = yieldto(sched_task)
+ have_result = true
+ else
+ task = ccall(:jl_task_get_next, Ref{Task}, (Any, Any, Any),
+ trypoptask, W, checktaskempty)
+ end
+ end
+ # We may have already switched tasks (via the scheduler task), so
+ # only switch if we haven't.
+ if !have_result
+ @assert task isa Task
+ set_next_task(task)
+ result = try_yieldto(ensure_rescheduled)
+ end
return result
end
diff --git a/base/timing.jl b/base/timing.jl
index 61fa73f2eff62..9e3a4cf128413 100644
--- a/base/timing.jl
+++ b/base/timing.jl
@@ -472,6 +472,35 @@ function gc_bytes()
b[]
end
+function allocated(f, args::Vararg{Any,N}) where {N}
+ b0 = Ref{Int64}(0)
+ b1 = Ref{Int64}(0)
+ Base.gc_bytes(b0)
+ f(args...)
+ Base.gc_bytes(b1)
+ return b1[] - b0[]
+end
+only(methods(allocated)).called = 0xff
+
+function allocations(f, args::Vararg{Any,N}) where {N}
+ stats = Base.gc_num()
+ f(args...)
+ diff = Base.GC_Diff(Base.gc_num(), stats)
+ return Base.gc_alloc_count(diff)
+end
+only(methods(allocations)).called = 0xff
+
+function is_simply_call(@nospecialize ex)
+ Meta.isexpr(ex, :call) || return false
+ for a in ex.args
+ a isa QuoteNode && continue
+ a isa Symbol && continue
+ Base.is_self_quoting(a) && continue
+ return false
+ end
+ return true
+end
+
"""
@allocated
@@ -487,15 +516,11 @@ julia> @allocated rand(10^6)
```
"""
macro allocated(ex)
- quote
- Experimental.@force_compile
- local b0 = Ref{Int64}(0)
- local b1 = Ref{Int64}(0)
- gc_bytes(b0)
- $(esc(ex))
- gc_bytes(b1)
- b1[] - b0[]
+ if !is_simply_call(ex)
+ ex = :((() -> $ex)())
end
+ pushfirst!(ex.args, GlobalRef(Base, :allocated))
+ return esc(ex)
end
"""
@@ -516,15 +541,14 @@ julia> @allocations rand(10^6)
This macro was added in Julia 1.9.
"""
macro allocations(ex)
- quote
- Experimental.@force_compile
- local stats = Base.gc_num()
- $(esc(ex))
- local diff = Base.GC_Diff(Base.gc_num(), stats)
- Base.gc_alloc_count(diff)
+ if !is_simply_call(ex)
+ ex = :((() -> $ex)())
end
+ pushfirst!(ex.args, GlobalRef(Base, :allocations))
+ return esc(ex)
end
+
"""
@lock_conflicts
@@ -643,33 +667,36 @@ end
# here so it's possible to time/trace all imports, including InteractiveUtils and its deps
macro time_imports(ex)
quote
- try
- Base.Threads.atomic_add!(Base.TIMING_IMPORTS, 1)
- $(esc(ex))
- finally
+ Base.Threads.atomic_add!(Base.TIMING_IMPORTS, 1)
+ @__tryfinally(
+ # try
+ $(esc(ex)),
+ # finally
Base.Threads.atomic_sub!(Base.TIMING_IMPORTS, 1)
- end
+ )
end
end
macro trace_compile(ex)
quote
- try
- ccall(:jl_force_trace_compile_timing_enable, Cvoid, ())
- $(esc(ex))
- finally
+ ccall(:jl_force_trace_compile_timing_enable, Cvoid, ())
+ @__tryfinally(
+ # try
+ $(esc(ex)),
+ # finally
ccall(:jl_force_trace_compile_timing_disable, Cvoid, ())
- end
+ )
end
end
macro trace_dispatch(ex)
quote
- try
- ccall(:jl_force_trace_dispatch_enable, Cvoid, ())
- $(esc(ex))
- finally
+ ccall(:jl_force_trace_dispatch_enable, Cvoid, ())
+ @__tryfinally(
+ # try
+ $(esc(ex)),
+ # finally
ccall(:jl_force_trace_dispatch_disable, Cvoid, ())
- end
+ )
end
end
diff --git a/base/tuple.jl b/base/tuple.jl
index ee3174d783531..2ff8a1185a007 100644
--- a/base/tuple.jl
+++ b/base/tuple.jl
@@ -1,5 +1,7 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license
+import Core: Tuple
+
# Document NTuple here where we have everything needed for the doc system
"""
NTuple{N, T}
diff --git a/cli/Makefile b/cli/Makefile
index 3cc0af1a76afd..8c73d76f5020f 100644
--- a/cli/Makefile
+++ b/cli/Makefile
@@ -28,8 +28,8 @@ LOADER_LDFLAGS += -Wl,--no-as-needed -lpthread -rdynamic -lc -Wl,--as-needed
endif
# Build list of dependent libraries that must be opened
-SHIPFLAGS += -DDEP_LIBS="\"$(LOADER_BUILD_DEP_LIBS)\""
-DEBUGFLAGS += -DDEP_LIBS="\"$(LOADER_DEBUG_BUILD_DEP_LIBS)\""
+SHIPFLAGS += -DDEP_LIBS=$(call shell_escape,$(call c_escape,$(LOADER_BUILD_DEP_LIBS)))
+DEBUGFLAGS += -DDEP_LIBS=$(call shell_escape,$(call c_escape,$(LOADER_DEBUG_BUILD_DEP_LIBS)))
ifneq (,$(findstring MINGW,$(shell uname)))
# In MSYS2, do not perform path conversion for `DEP_LIBS`.
# https://www.msys2.org/wiki/Porting/#filesystem-namespaces
diff --git a/cli/loader_lib.c b/cli/loader_lib.c
index af2a36cfce8ab..4d75cfd9563cb 100644
--- a/cli/loader_lib.c
+++ b/cli/loader_lib.c
@@ -349,10 +349,16 @@ static char *libstdcxxprobe(void)
pid_t npid = waitpid(pid, &wstatus, 0);
if (npid == -1) {
if (errno == EINTR) continue;
- if (errno != EINTR) {
- perror("Error during libstdcxxprobe in parent process:\nwaitpid");
- exit(1);
+ if (errno == ECHILD) {
+ // SIGCHLD is set to SIG_IGN or has flag SA_NOCLDWAIT, so the child
+ // did not become a zombie and wait for `waitpid` - it just exited.
+ //
+ // Assume that it exited successfully and use whatever libpath we
+ // got out of the pipe, if any.
+ break;
}
+ perror("Error during libstdcxxprobe in parent process:\nwaitpid");
+ exit(1);
}
else if (!WIFEXITED(wstatus)) {
const char *err_str = "Error during libstdcxxprobe in parent process:\n"
diff --git a/contrib/generate_precompile.jl b/contrib/generate_precompile.jl
index b075223d9c7e4..13ad25e620b02 100644
--- a/contrib/generate_precompile.jl
+++ b/contrib/generate_precompile.jl
@@ -1,7 +1,7 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license
# Prevent this from putting anything into the Main namespace
-@eval Core.Module() begin
+@eval Base module __precompile_script
if Threads.maxthreadid() != 1
@warn "Running this file with multiple Julia threads may lead to a build error" Threads.maxthreadid()
@@ -46,7 +46,6 @@ precompile(Tuple{typeof(Base.Threads.atomic_sub!), Base.Threads.Atomic{Int}, Int
precompile(Tuple{Type{Base.Val{x} where x}, Module})
precompile(Tuple{Type{NamedTuple{(:honor_overrides,), T} where T<:Tuple}, Tuple{Bool}})
precompile(Tuple{typeof(Base.unique!), Array{String, 1}})
-precompile(Tuple{typeof(Base.invokelatest), Any})
precompile(Tuple{typeof(Base.vcat), Array{String, 1}, Array{String, 1}})
# Pkg loading
@@ -107,6 +106,9 @@ precompile(Base.CoreLogging.env_override_minlevel, (Symbol, Module))
precompile(Base.StackTraces.lookup, (Ptr{Nothing},))
precompile(Tuple{typeof(Base.run_module_init), Module, Int})
+# Presence tested in the tests
+precompile(Tuple{typeof(Base.print), Base.IOStream, String})
+
# precompilepkgs
precompile(Tuple{typeof(Base.get), Type{Array{String, 1}}, Base.Dict{String, Any}, String})
precompile(Tuple{typeof(Base.get), Type{Base.Dict{String, Any}}, Base.Dict{String, Any}, String})
@@ -352,10 +354,10 @@ generate_precompile_statements() = try # Make sure `ansi_enablecursor` is printe
PrecompileStagingArea = Module()
for (_pkgid, _mod) in Base.loaded_modules
if !(_pkgid.name in ("Main", "Core", "Base"))
- eval(PrecompileStagingArea, :(const $(Symbol(_mod)) = $_mod))
+ Core.eval(PrecompileStagingArea, :(const $(Symbol(_mod)) = $_mod))
end
end
- eval(PrecompileStagingArea, :(const Compiler = Base.Compiler))
+ Core.eval(PrecompileStagingArea, :(const Compiler = Base.Compiler))
n_succeeded = 0
# Make statements unique
diff --git a/contrib/juliac-buildscript.jl b/contrib/juliac-buildscript.jl
index c23b679272b1e..0549afc0e1508 100644
--- a/contrib/juliac-buildscript.jl
+++ b/contrib/juliac-buildscript.jl
@@ -17,6 +17,9 @@ task.rngState3 = 0x3a77f7189200c20b
task.rngState4 = 0x5502376d099035ae
uuid_tuple = (UInt64(0), UInt64(0))
ccall(:jl_set_module_uuid, Cvoid, (Any, NTuple{2, UInt64}), Base.__toplevel__, uuid_tuple)
+if Base.get_bool_env("JULIA_USE_FLISP_PARSER", false) === false
+ Base.JuliaSyntax.enable_in_core!()
+end
# Patch methods in Core and Base
@@ -35,17 +38,8 @@ end
set_active_project(projfile::Union{AbstractString,Nothing}) = ACTIVE_PROJECT[] = projfile
disable_library_threading() = nothing
start_profile_listener() = nothing
- @inline function invokelatest(f::F, args...; kwargs...) where F
- return f(args...; kwargs...)
- end
- @inline function invokelatest_gr(gr::GlobalRef, @nospecialize args...; kwargs...)
- @inline
- kwargs = merge(NamedTuple(), kwargs)
- if isempty(kwargs)
- return apply_gr(gr, args...)
- end
- return apply_gr_kw(kwargs, gr, args...)
- end
+ invokelatest_trimmed(f, args...; kwargs...) = f(args...; kwargs...)
+ const invokelatest = invokelatest_trimmed
function sprint(f::F, args::Vararg{Any,N}; context=nothing, sizehint::Integer=0) where {F<:Function,N}
s = IOBuffer(sizehint=sizehint)
if context isa Tuple
@@ -132,15 +126,8 @@ end
mapreduce_empty(::typeof(abs), op::F, T) where {F} = abs(reduce_empty(op, T))
mapreduce_empty(::typeof(abs2), op::F, T) where {F} = abs2(reduce_empty(op, T))
end
-@eval Base.Unicode begin
- function utf8proc_map(str::Union{String,SubString{String}}, options::Integer, chartransform::F = identity) where F
- nwords = utf8proc_decompose(str, options, C_NULL, 0, chartransform)
- buffer = Base.StringVector(nwords*4)
- nwords = utf8proc_decompose(str, options, buffer, nwords, chartransform)
- nbytes = ccall(:utf8proc_reencode, Int, (Ptr{UInt8}, Int, Cint), buffer, nwords, options)
- nbytes < 0 && utf8proc_error(nbytes)
- return String(resize!(buffer, nbytes))
- end
+@eval Base.Sys begin
+ __init_build() = nothing
end
@eval Base.GMP begin
function __init__()
@@ -202,6 +189,7 @@ let mod = Base.include(Base.__toplevel__, inputfile)
if !isa(mod, Module)
mod = Main
end
+ Core.@latestworld
if output_type == "--output-exe" && isdefined(mod, :main) && !add_ccallables
entrypoint(mod.main, ())
end
@@ -209,6 +197,7 @@ let mod = Base.include(Base.__toplevel__, inputfile)
#entrypoint(join, (Base.GenericIOBuffer{Memory{UInt8}}, Array{String, 1}, Char))
entrypoint(Base.task_done_hook, (Task,))
entrypoint(Base.wait, ())
+ entrypoint(Base.wait_forever, ())
entrypoint(Base.trypoptask, (Base.StickyWorkqueue,))
entrypoint(Base.checktaskempty, ())
if add_ccallables
@@ -217,10 +206,12 @@ let mod = Base.include(Base.__toplevel__, inputfile)
end
# Additional method patches depending on whether user code loads certain stdlibs
+let
+ find_loaded_root_module(key::Base.PkgId) = Base.maybe_root_module(key)
-let loaded = Symbol.(Base.loaded_modules_array()) # TODO better way to do this
- if :SparseArrays in loaded
- using SparseArrays
+ SparseArrays = find_loaded_root_module(Base.PkgId(
+ Base.UUID("2f01184e-e22b-5df5-ae63-d93ebab69eaf"), "SparseArrays"))
+ if SparseArrays !== nothing
@eval SparseArrays.CHOLMOD begin
function __init__()
ccall((:SuiteSparse_config_malloc_func_set, :libsuitesparseconfig),
@@ -234,10 +225,21 @@ let loaded = Symbol.(Base.loaded_modules_array()) # TODO better way to do this
end
end
end
- if :Artifacts in loaded
- using Artifacts
+
+ Artifacts = find_loaded_root_module(Base.PkgId(
+ Base.UUID("56f22d72-fd6d-98f1-02f0-08ddc0907c33"), "Artifacts"))
+ if Artifacts !== nothing
@eval Artifacts begin
- function _artifact_str(__module__, artifacts_toml, name, path_tail, artifact_dict, hash, platform, _::Val{lazyartifacts}) where lazyartifacts
+ function _artifact_str(
+ __module__,
+ artifacts_toml,
+ name,
+ path_tail,
+ artifact_dict,
+ hash,
+ platform,
+ _::Val{LazyArtifacts}
+ ) where LazyArtifacts
# If the artifact exists, we're in the happy path and we can immediately
# return the path to the artifact:
dirs = artifacts_dirs(bytes2hex(hash.bytes))
@@ -250,26 +252,34 @@ let loaded = Symbol.(Base.loaded_modules_array()) # TODO better way to do this
end
end
end
- if :Pkg in loaded
- using Pkg
+
+ Pkg = find_loaded_root_module(Base.PkgId(
+ Base.UUID("44cfe95a-1eb2-52ea-b672-e2afdf69b78f"), "Pkg"))
+ if Pkg !== nothing
@eval Pkg begin
__init__() = rand() #TODO, methods that do nothing don't get codegened
end
end
- if :StyledStrings in loaded
- using StyledStrings
+
+ StyledStrings = find_loaded_root_module(Base.PkgId(
+ Base.UUID("f489334b-da3d-4c2e-b8f0-e476e12c162b"), "StyledStrings"))
+ if StyledStrings !== nothing
@eval StyledStrings begin
__init__() = rand()
end
end
- if :Markdown in loaded
- using Markdown
+
+ Markdown = find_loaded_root_module(Base.PkgId(
+ Base.UUID("d6f4376e-aef5-505a-96c1-9c027394607a"), "Markdown"))
+ if Markdown !== nothing
@eval Markdown begin
__init__() = rand()
end
end
- if :JuliaSyntaxHighlighting in loaded
- using JuliaSyntaxHighlighting
+
+ JuliaSyntaxHighlighting = find_loaded_root_module(Base.PkgId(
+ Base.UUID("ac6e5ff7-fb65-4e79-a425-ec3bc9c03011"), "JuliaSyntaxHighlighting"))
+ if JuliaSyntaxHighlighting !== nothing
@eval JuliaSyntaxHighlighting begin
__init__() = rand()
end
diff --git a/contrib/juliac.jl b/contrib/juliac.jl
index 8b413fccd6231..b110f1d233690 100644
--- a/contrib/juliac.jl
+++ b/contrib/juliac.jl
@@ -6,6 +6,8 @@ module JuliaConfig
end
julia_cmd = `$(Base.julia_cmd()) --startup-file=no --history-file=no`
+cpu_target = get(ENV, "JULIA_CPU_TARGET", nothing)
+julia_cmd_target = `$(Base.julia_cmd(;cpu_target)) --startup-file=no --history-file=no`
output_type = nothing # exe, sharedlib, sysimage
outname = nothing
file = nothing
@@ -28,6 +30,7 @@ end
# arguments to forward to julia compilation process
julia_args = []
+enable_trim::Bool = false
let i = 1
while i <= length(ARGS)
@@ -44,9 +47,11 @@ let i = 1
global verbose = true
elseif arg == "--relative-rpath"
global relative_rpath = true
- elseif startswith(arg, "--trim") || arg == "--experimental"
- # forwarded args
- push!(julia_args, arg)
+ elseif startswith(arg, "--trim")
+ global enable_trim = arg != "--trim=no"
+ push!(julia_args, arg) # forwarded arg
+ elseif arg == "--experimental"
+ push!(julia_args, arg) # forwarded arg
else
if arg[1] == '-' || !isnothing(file)
println("Unexpected argument `$arg`")
@@ -83,8 +88,6 @@ allflags = Base.shell_split(allflags)
rpath = get_rpath(; relative = relative_rpath)
rpath = Base.shell_split(rpath)
tmpdir = mktempdir(cleanup=false)
-initsrc_path = joinpath(tmpdir, "init.c")
-init_path = joinpath(tmpdir, "init.a")
img_path = joinpath(tmpdir, "img.a")
bc_path = joinpath(tmpdir, "img-bc.a")
@@ -100,28 +103,23 @@ function precompile_env()
end
end
-function compile_products()
+function compile_products(enable_trim::Bool)
+
+ # Only strip IR / metadata if not `--trim=no`
+ strip_args = String[]
+ if enable_trim
+ push!(strip_args, "--strip-ir")
+ push!(strip_args, "--strip-metadata")
+ end
+
# Compile the Julia code
- cmd = addenv(`$julia_cmd --project=$(Base.active_project()) --output-o $img_path --output-incremental=no --strip-ir --strip-metadata $julia_args $(joinpath(@__DIR__,"juliac-buildscript.jl")) $absfile $output_type $add_ccallables`, "OPENBLAS_NUM_THREADS" => 1, "JULIA_NUM_THREADS" => 1)
+ cmd = addenv(`$julia_cmd_target --project=$(Base.active_project()) --output-o $img_path --output-incremental=no $strip_args $julia_args $(joinpath(@__DIR__,"juliac-buildscript.jl")) $absfile $output_type $add_ccallables`, "OPENBLAS_NUM_THREADS" => 1, "JULIA_NUM_THREADS" => 1)
verbose && println("Running: $cmd")
if !success(pipeline(cmd; stdout, stderr))
println(stderr, "\nFailed to compile $file")
exit(1)
end
- # Compile the initialization code
- open(initsrc_path, "w") do io
- print(io, """
- #include
- __attribute__((constructor)) void static_init(void) {
- if (jl_is_initialized())
- return;
- julia_init(JL_IMAGE_IN_MEMORY);
- jl_exception_clear();
- }
- """)
- end
- run(`cc $(cflags) -g -c -o $init_path $initsrc_path`)
end
function link_products()
@@ -137,11 +135,11 @@ function link_products()
julia_libs = Base.shell_split(Base.isdebugbuild() ? "-ljulia-debug -ljulia-internal-debug" : "-ljulia -ljulia-internal")
try
if output_type == "--output-lib"
- cmd2 = `cc $(allflags) $(rpath) -o $outname -shared -Wl,$(Base.Linking.WHOLE_ARCHIVE) $img_path -Wl,$(Base.Linking.NO_WHOLE_ARCHIVE) $init_path $(julia_libs)`
+ cmd2 = `cc $(allflags) $(rpath) -o $outname -shared -Wl,$(Base.Linking.WHOLE_ARCHIVE) $img_path -Wl,$(Base.Linking.NO_WHOLE_ARCHIVE) $(julia_libs)`
elseif output_type == "--output-sysimage"
cmd2 = `cc $(allflags) $(rpath) -o $outname -shared -Wl,$(Base.Linking.WHOLE_ARCHIVE) $img_path -Wl,$(Base.Linking.NO_WHOLE_ARCHIVE) $(julia_libs)`
else
- cmd2 = `cc $(allflags) $(rpath) -o $outname -Wl,$(Base.Linking.WHOLE_ARCHIVE) $img_path -Wl,$(Base.Linking.NO_WHOLE_ARCHIVE) $init_path $(julia_libs)`
+ cmd2 = `cc $(allflags) $(rpath) -o $outname -Wl,$(Base.Linking.WHOLE_ARCHIVE) $img_path -Wl,$(Base.Linking.NO_WHOLE_ARCHIVE) $(julia_libs)`
end
verbose && println("Running: $cmd2")
run(cmd2)
@@ -152,5 +150,5 @@ function link_products()
end
precompile_env()
-compile_products()
+compile_products(enable_trim)
link_products()
diff --git a/deps/JuliaSyntax.version b/deps/JuliaSyntax.version
index a7d31b7c16403..9487754d8a617 100644
--- a/deps/JuliaSyntax.version
+++ b/deps/JuliaSyntax.version
@@ -1,4 +1,4 @@
JULIASYNTAX_BRANCH = main
-JULIASYNTAX_SHA1 = 2e965a159dd9f87d216d2d50ecbd2ed4f9af2c5a
+JULIASYNTAX_SHA1 = 46723f071d5b2efcb21ca6757788028afb91cc13
JULIASYNTAX_GIT_URL := https://github.com/JuliaLang/JuliaSyntax.jl.git
JULIASYNTAX_TAR_URL = https://api.github.com/repos/JuliaLang/JuliaSyntax.jl/tarball/$1
diff --git a/deps/checksums/JuliaSyntax-2e965a159dd9f87d216d2d50ecbd2ed4f9af2c5a.tar.gz/md5 b/deps/checksums/JuliaSyntax-2e965a159dd9f87d216d2d50ecbd2ed4f9af2c5a.tar.gz/md5
deleted file mode 100644
index 96f356f3faaec..0000000000000
--- a/deps/checksums/JuliaSyntax-2e965a159dd9f87d216d2d50ecbd2ed4f9af2c5a.tar.gz/md5
+++ /dev/null
@@ -1 +0,0 @@
-40d7bcc6e5741d50a457ace2ca8b2c0c
diff --git a/deps/checksums/JuliaSyntax-2e965a159dd9f87d216d2d50ecbd2ed4f9af2c5a.tar.gz/sha512 b/deps/checksums/JuliaSyntax-2e965a159dd9f87d216d2d50ecbd2ed4f9af2c5a.tar.gz/sha512
deleted file mode 100644
index fd7770cdeaa75..0000000000000
--- a/deps/checksums/JuliaSyntax-2e965a159dd9f87d216d2d50ecbd2ed4f9af2c5a.tar.gz/sha512
+++ /dev/null
@@ -1 +0,0 @@
-b9429b90a28460ef0272cd42a5c221629c6d60221ed088ae3e591cc3d8dbdec32788074397419e58b611bda7df32c7379ec7fafeead7056ed9665591474cec5d
diff --git a/deps/checksums/JuliaSyntax-46723f071d5b2efcb21ca6757788028afb91cc13.tar.gz/md5 b/deps/checksums/JuliaSyntax-46723f071d5b2efcb21ca6757788028afb91cc13.tar.gz/md5
new file mode 100644
index 0000000000000..ff40f520dfe85
--- /dev/null
+++ b/deps/checksums/JuliaSyntax-46723f071d5b2efcb21ca6757788028afb91cc13.tar.gz/md5
@@ -0,0 +1 @@
+2a0921e59edfab54554aa173f091c5b7
diff --git a/deps/checksums/JuliaSyntax-46723f071d5b2efcb21ca6757788028afb91cc13.tar.gz/sha512 b/deps/checksums/JuliaSyntax-46723f071d5b2efcb21ca6757788028afb91cc13.tar.gz/sha512
new file mode 100644
index 0000000000000..64e90d0edaba0
--- /dev/null
+++ b/deps/checksums/JuliaSyntax-46723f071d5b2efcb21ca6757788028afb91cc13.tar.gz/sha512
@@ -0,0 +1 @@
+17050e23216335f6599f009f71e9614a11b6686e455554b1efd287cd8526a7ebece06dc473e34cd50f61bf52085ff72bb4279144a9fdb3a234d3d589a10fddaf
diff --git a/deps/checksums/JuliaSyntaxHighlighting-2680c8bde1aa274f25d7a434c645f16b3a1ee731.tar.gz/md5 b/deps/checksums/JuliaSyntaxHighlighting-2680c8bde1aa274f25d7a434c645f16b3a1ee731.tar.gz/md5
deleted file mode 100644
index 30284ccf352d4..0000000000000
--- a/deps/checksums/JuliaSyntaxHighlighting-2680c8bde1aa274f25d7a434c645f16b3a1ee731.tar.gz/md5
+++ /dev/null
@@ -1 +0,0 @@
-187f155c32a79f57a89e31e672d2d8c5
diff --git a/deps/checksums/JuliaSyntaxHighlighting-2680c8bde1aa274f25d7a434c645f16b3a1ee731.tar.gz/sha512 b/deps/checksums/JuliaSyntaxHighlighting-2680c8bde1aa274f25d7a434c645f16b3a1ee731.tar.gz/sha512
deleted file mode 100644
index bdce410b84d69..0000000000000
--- a/deps/checksums/JuliaSyntaxHighlighting-2680c8bde1aa274f25d7a434c645f16b3a1ee731.tar.gz/sha512
+++ /dev/null
@@ -1 +0,0 @@
-69347af996d77b88b5e5b6e44ff046e9197775a66802a0da6fb5fcbf9e5ca533566955c8435bc25490f6ca0c002b4c1effcddaf932b7eb91e00a8f99554b7b8d
diff --git a/deps/checksums/JuliaSyntaxHighlighting-b7a1c636d3e9690bfbbfe917bb20f6cb112a3e6f.tar.gz/md5 b/deps/checksums/JuliaSyntaxHighlighting-b7a1c636d3e9690bfbbfe917bb20f6cb112a3e6f.tar.gz/md5
new file mode 100644
index 0000000000000..2a4b55e15ab2d
--- /dev/null
+++ b/deps/checksums/JuliaSyntaxHighlighting-b7a1c636d3e9690bfbbfe917bb20f6cb112a3e6f.tar.gz/md5
@@ -0,0 +1 @@
+ed0ccc4434fc70b06e8ea1ddb8141511
diff --git a/deps/checksums/JuliaSyntaxHighlighting-b7a1c636d3e9690bfbbfe917bb20f6cb112a3e6f.tar.gz/sha512 b/deps/checksums/JuliaSyntaxHighlighting-b7a1c636d3e9690bfbbfe917bb20f6cb112a3e6f.tar.gz/sha512
new file mode 100644
index 0000000000000..456f1ee64ca0b
--- /dev/null
+++ b/deps/checksums/JuliaSyntaxHighlighting-b7a1c636d3e9690bfbbfe917bb20f6cb112a3e6f.tar.gz/sha512
@@ -0,0 +1 @@
+04efea853a1c1bfbf5baf4d2908ce492a5ff3029bca73a004280aa116157b6b678a5f9fd6a115f9c57a625d0841d3fb96c8d68ec467e5bc4a743272bee84c8c7
diff --git a/deps/checksums/LinearAlgebra-4e7c3f40316a956119ac419a97c4b8aad7a17e6c.tar.gz/md5 b/deps/checksums/LinearAlgebra-4e7c3f40316a956119ac419a97c4b8aad7a17e6c.tar.gz/md5
new file mode 100644
index 0000000000000..d96f9c1708089
--- /dev/null
+++ b/deps/checksums/LinearAlgebra-4e7c3f40316a956119ac419a97c4b8aad7a17e6c.tar.gz/md5
@@ -0,0 +1 @@
+a622992fb673fc417e588f262b679b00
diff --git a/deps/checksums/LinearAlgebra-4e7c3f40316a956119ac419a97c4b8aad7a17e6c.tar.gz/sha512 b/deps/checksums/LinearAlgebra-4e7c3f40316a956119ac419a97c4b8aad7a17e6c.tar.gz/sha512
new file mode 100644
index 0000000000000..ba63ad3cb9264
--- /dev/null
+++ b/deps/checksums/LinearAlgebra-4e7c3f40316a956119ac419a97c4b8aad7a17e6c.tar.gz/sha512
@@ -0,0 +1 @@
+48ce73841c0a766d0e0310beac77033a694f7fcc30b939241d0c04f35f079dd8f932f1973c002180f178a9f2bdf99535f09bea4f20151e09948f21549bfa1490
diff --git a/deps/checksums/LinearAlgebra-e7da19f2764ba36bd0a9eb8ec67dddce19d87114.tar.gz/md5 b/deps/checksums/LinearAlgebra-e7da19f2764ba36bd0a9eb8ec67dddce19d87114.tar.gz/md5
deleted file mode 100644
index b49f2365717a1..0000000000000
--- a/deps/checksums/LinearAlgebra-e7da19f2764ba36bd0a9eb8ec67dddce19d87114.tar.gz/md5
+++ /dev/null
@@ -1 +0,0 @@
-f9258e97e2f478f66a4e63ed008a6953
diff --git a/deps/checksums/LinearAlgebra-e7da19f2764ba36bd0a9eb8ec67dddce19d87114.tar.gz/sha512 b/deps/checksums/LinearAlgebra-e7da19f2764ba36bd0a9eb8ec67dddce19d87114.tar.gz/sha512
deleted file mode 100644
index 4873f3162c8be..0000000000000
--- a/deps/checksums/LinearAlgebra-e7da19f2764ba36bd0a9eb8ec67dddce19d87114.tar.gz/sha512
+++ /dev/null
@@ -1 +0,0 @@
-3ed43673c69b8ee549bf5b63d9a6d814f9638269fc0b12d7f7c735581757f1627a3dcd1f242f8fde2cbde1509b43d261191a9a0bb019e22759cb019936ae949e
diff --git a/deps/checksums/SparseArrays-212981bf29b03ba460d3251ee9aa4399931b3f2d.tar.gz/md5 b/deps/checksums/SparseArrays-212981bf29b03ba460d3251ee9aa4399931b3f2d.tar.gz/md5
deleted file mode 100644
index 3fddcf07235f8..0000000000000
--- a/deps/checksums/SparseArrays-212981bf29b03ba460d3251ee9aa4399931b3f2d.tar.gz/md5
+++ /dev/null
@@ -1 +0,0 @@
-621e67dc98707b587fb0f6e319dadbb2
diff --git a/deps/checksums/SparseArrays-212981bf29b03ba460d3251ee9aa4399931b3f2d.tar.gz/sha512 b/deps/checksums/SparseArrays-212981bf29b03ba460d3251ee9aa4399931b3f2d.tar.gz/sha512
deleted file mode 100644
index 68885439a1213..0000000000000
--- a/deps/checksums/SparseArrays-212981bf29b03ba460d3251ee9aa4399931b3f2d.tar.gz/sha512
+++ /dev/null
@@ -1 +0,0 @@
-5608adf92eaf7479eacf5ed75b3139438d0d4acf53d55a38c73a553c7fd899f553e1648fa657d35b9a0289e69fc461025dae5f8d15ec891eafcab3a663a8413a
diff --git a/deps/checksums/SparseArrays-72c7cac6bbf21367a3c2fbc5c50e908aea5984bb.tar.gz/md5 b/deps/checksums/SparseArrays-72c7cac6bbf21367a3c2fbc5c50e908aea5984bb.tar.gz/md5
new file mode 100644
index 0000000000000..12c4f2ff97697
--- /dev/null
+++ b/deps/checksums/SparseArrays-72c7cac6bbf21367a3c2fbc5c50e908aea5984bb.tar.gz/md5
@@ -0,0 +1 @@
+3f25f8a47a7945b55c9cc53ef489a55f
diff --git a/deps/checksums/SparseArrays-72c7cac6bbf21367a3c2fbc5c50e908aea5984bb.tar.gz/sha512 b/deps/checksums/SparseArrays-72c7cac6bbf21367a3c2fbc5c50e908aea5984bb.tar.gz/sha512
new file mode 100644
index 0000000000000..5daf7514ff4ed
--- /dev/null
+++ b/deps/checksums/SparseArrays-72c7cac6bbf21367a3c2fbc5c50e908aea5984bb.tar.gz/sha512
@@ -0,0 +1 @@
+5fd827602430e79846d974661b039902a5ab6495f94af8292a3d66c3c3a07a0c59858bfa5bfa941bf8bf6418af98d1dd41b88aad4cc7c7355a8e56cad7f1f3ac
diff --git a/deps/checksums/Statistics-77bd5707f143eb624721a7df28ddef470e70ecef.tar.gz/md5 b/deps/checksums/Statistics-77bd5707f143eb624721a7df28ddef470e70ecef.tar.gz/md5
new file mode 100644
index 0000000000000..600c561d0cf14
--- /dev/null
+++ b/deps/checksums/Statistics-77bd5707f143eb624721a7df28ddef470e70ecef.tar.gz/md5
@@ -0,0 +1 @@
+5235ac479da042d5dc3c572c473b7219
diff --git a/deps/checksums/Statistics-77bd5707f143eb624721a7df28ddef470e70ecef.tar.gz/sha512 b/deps/checksums/Statistics-77bd5707f143eb624721a7df28ddef470e70ecef.tar.gz/sha512
new file mode 100644
index 0000000000000..2f663a3d7c44d
--- /dev/null
+++ b/deps/checksums/Statistics-77bd5707f143eb624721a7df28ddef470e70ecef.tar.gz/sha512
@@ -0,0 +1 @@
+0c02ccf1b4988fc701209afb949f27e6f675f37a628385d3f28dc9ea333fed38ce1ca77b001e58fdbe15af833bbe98598cbf478cef21a98b37d54acfe52270b6
diff --git a/deps/checksums/Statistics-d49c2bf4f81e1efb4980a35fe39c815ef8396297.tar.gz/md5 b/deps/checksums/Statistics-d49c2bf4f81e1efb4980a35fe39c815ef8396297.tar.gz/md5
deleted file mode 100644
index 3956c67f7fd47..0000000000000
--- a/deps/checksums/Statistics-d49c2bf4f81e1efb4980a35fe39c815ef8396297.tar.gz/md5
+++ /dev/null
@@ -1 +0,0 @@
-acf2bb0ea30132602e172e2f5f6274b4
diff --git a/deps/checksums/Statistics-d49c2bf4f81e1efb4980a35fe39c815ef8396297.tar.gz/sha512 b/deps/checksums/Statistics-d49c2bf4f81e1efb4980a35fe39c815ef8396297.tar.gz/sha512
deleted file mode 100644
index 051f2d0a862c3..0000000000000
--- a/deps/checksums/Statistics-d49c2bf4f81e1efb4980a35fe39c815ef8396297.tar.gz/sha512
+++ /dev/null
@@ -1 +0,0 @@
-5e879fe79bae19b62f81659a102602271c73a424faf4be069ab31fb50e30b536a8c7b3692127763000cc1dbab69c93ac3da7bace5f093d05dce2d652fb221d52
diff --git a/deps/checksums/StyledStrings-3fe829fcf611b5fefaefb64df7e61f2ae82db117.tar.gz/md5 b/deps/checksums/StyledStrings-3fe829fcf611b5fefaefb64df7e61f2ae82db117.tar.gz/md5
new file mode 100644
index 0000000000000..46d5cacf788df
--- /dev/null
+++ b/deps/checksums/StyledStrings-3fe829fcf611b5fefaefb64df7e61f2ae82db117.tar.gz/md5
@@ -0,0 +1 @@
+1cb6007a66d3f74cbe5b27ee449aa9c8
diff --git a/deps/checksums/StyledStrings-3fe829fcf611b5fefaefb64df7e61f2ae82db117.tar.gz/sha512 b/deps/checksums/StyledStrings-3fe829fcf611b5fefaefb64df7e61f2ae82db117.tar.gz/sha512
new file mode 100644
index 0000000000000..724b2d311c123
--- /dev/null
+++ b/deps/checksums/StyledStrings-3fe829fcf611b5fefaefb64df7e61f2ae82db117.tar.gz/sha512
@@ -0,0 +1 @@
+1fa95646fdf4cc7ea282bd355fded9464e7572792912942ea1c45f6ed126eead2333fdeed92e7db3efbcd6c3a171a04e5c9562dab2685bb39947136284ae1da3
diff --git a/deps/checksums/StyledStrings-8985a37ac054c37d084a03ad2837208244824877.tar.gz/md5 b/deps/checksums/StyledStrings-8985a37ac054c37d084a03ad2837208244824877.tar.gz/md5
deleted file mode 100644
index 0fd8e8966e068..0000000000000
--- a/deps/checksums/StyledStrings-8985a37ac054c37d084a03ad2837208244824877.tar.gz/md5
+++ /dev/null
@@ -1 +0,0 @@
-411277f3701cc3e286ec8a84ccdf6f11
diff --git a/deps/checksums/StyledStrings-8985a37ac054c37d084a03ad2837208244824877.tar.gz/sha512 b/deps/checksums/StyledStrings-8985a37ac054c37d084a03ad2837208244824877.tar.gz/sha512
deleted file mode 100644
index 0b495aefef55d..0000000000000
--- a/deps/checksums/StyledStrings-8985a37ac054c37d084a03ad2837208244824877.tar.gz/sha512
+++ /dev/null
@@ -1 +0,0 @@
-95a7e92389f6fd02d3bec17ec0201ba41316aa2d7c321b14af88ccce8246fd0000ed2c0cc818f87cb81f7134304233db897f656426a00caac1bc7635056260c2
diff --git a/deps/checksums/cacert-2024-12-31.pem/md5 b/deps/checksums/cacert-2024-12-31.pem/md5
deleted file mode 100644
index b01bf68ddc247..0000000000000
--- a/deps/checksums/cacert-2024-12-31.pem/md5
+++ /dev/null
@@ -1 +0,0 @@
-d9178b626f8b87f51b47987418d012bf
diff --git a/deps/checksums/cacert-2024-12-31.pem/sha512 b/deps/checksums/cacert-2024-12-31.pem/sha512
deleted file mode 100644
index c12b8215a7855..0000000000000
--- a/deps/checksums/cacert-2024-12-31.pem/sha512
+++ /dev/null
@@ -1 +0,0 @@
-bf578937d7826106bae1ebe74a70bfbc439387445a1f41ef57430de9d9aea6fcfa1884381bf0ef14632f6b89e9543642c9b774fcca93837efffdc557c4958dbd
diff --git a/deps/checksums/cacert-2025-02-25.pem/md5 b/deps/checksums/cacert-2025-02-25.pem/md5
new file mode 100644
index 0000000000000..3dced8d2bee6b
--- /dev/null
+++ b/deps/checksums/cacert-2025-02-25.pem/md5
@@ -0,0 +1 @@
+1a7de82bb9f0fcc779ca18a7a9310898
diff --git a/deps/checksums/cacert-2025-02-25.pem/sha512 b/deps/checksums/cacert-2025-02-25.pem/sha512
new file mode 100644
index 0000000000000..bb59a65af401e
--- /dev/null
+++ b/deps/checksums/cacert-2025-02-25.pem/sha512
@@ -0,0 +1 @@
+e5fe41820460e6b65e8cd463d1a5f01b7103e1ef66cb75fedc15ebcba3ba6600d77e5e7c2ab94cbb1f11c63b688026a04422bbe2d7a861f7a988f67522ffae3c
diff --git a/deps/checksums/mmtk_julia b/deps/checksums/mmtk_julia
index 098937aea1991..4ccc7b407cb60 100644
--- a/deps/checksums/mmtk_julia
+++ b/deps/checksums/mmtk_julia
@@ -8,3 +8,7 @@ mmtk_julia-c9e046baf3a0d52fe75d6c8b28f6afd69b045d95.tar.gz/md5/73a8fbea71edce30a
mmtk_julia-c9e046baf3a0d52fe75d6c8b28f6afd69b045d95.tar.gz/sha512/374848b7696b565dea66daa208830581f92c1fcb0138e7a7ab88564402e94bc79c54b6ed370ec68473e31e2bd411bf82c97793796c31d39aafbbfffea9c05588
mmtk_julia.v0.30.4+0.x86_64-linux-gnu.tar.gz/md5/8cdeb14fd69945f64308be49f6912f9c
mmtk_julia.v0.30.4+0.x86_64-linux-gnu.tar.gz/sha512/3692502f65dec8c0971b56b9bf8178641892b390d520cbcd69880d75b7500e6341534d87882246e68998f590f824ec54c18f4b8fb4aa09b8f313de065c48450e
+mmtk_julia-10ad6638b69b31a97a844f2f4e651e5ccea4e298.tar.gz/md5/59ed2c0e0b48673988a40527907f13ae
+mmtk_julia-10ad6638b69b31a97a844f2f4e651e5ccea4e298.tar.gz/sha512/d0988c37e82b8d481753f4ce83f38ba11276af3dafa8f65ee2c51122fce0dab056a65b3029cb255732226cc28d1a02e607bdaac91a02c0fd6a9fcfae834fee8c
+mmtk_julia.v0.30.5+1.x86_64-linux-gnu.tar.gz/md5/4d12d64754bb5c61e86e97e88bcf7912
+mmtk_julia.v0.30.5+1.x86_64-linux-gnu.tar.gz/sha512/0d619f00fd644338ca1ca2582b20e41db702dff8e0c338c093b2759b54379ba26ae7e0181c64931a45ebd5c3995540e535c248df9b986e73b18b65a39c5d78d2
diff --git a/deps/checksums/mpfr b/deps/checksums/mpfr
index 7b3b57978bd01..7f0de6099713c 100644
--- a/deps/checksums/mpfr
+++ b/deps/checksums/mpfr
@@ -1,38 +1,38 @@
-MPFR.v4.2.1+2.aarch64-apple-darwin.tar.gz/md5/1f5bba3e8e540720e239da75e5ae79eb
-MPFR.v4.2.1+2.aarch64-apple-darwin.tar.gz/sha512/7de26c625e540a5b88e280ec2cb8712d4514732d80a0c6342d2b2cabc6bc17c05f6c614b8e38800c93a4af5438c554733d3fa2002ef70072dfb44c08d3f03d26
-MPFR.v4.2.1+2.aarch64-linux-gnu.tar.gz/md5/112ddd4e5cddf36b005394f9cd81b8e5
-MPFR.v4.2.1+2.aarch64-linux-gnu.tar.gz/sha512/dc125f625e8c74ce18c052ef759ccbcfc2f3a932f2810a306bdddf70d5f37f3546200690fd08fb76742022322a7c1b9aa907b4aec6edb318060f0648ff426cbc
-MPFR.v4.2.1+2.aarch64-linux-musl.tar.gz/md5/a0919ef7cc35bb663d05e27da2bcb9a7
-MPFR.v4.2.1+2.aarch64-linux-musl.tar.gz/sha512/8acbaaca766c2ce225ac8df88c103a57fc52119d1fd54e9fc7d1f9d725c4ca9f74a0090e86eea0c140482a1abaf5b6086c453824a7516e9aef3ede5058f1767c
-MPFR.v4.2.1+2.aarch64-unknown-freebsd.tar.gz/md5/61e1dcc7e323b976854a4e8164316d37
-MPFR.v4.2.1+2.aarch64-unknown-freebsd.tar.gz/sha512/f3a5493f88b290d15aff9bf79b15158d19bea05af7210b2967368e0b2f98cd291f77e62f39ee0c7ad4e9d2ef6ebdba4bf2fea24c723791f71f7b9b1ef989a67d
-MPFR.v4.2.1+2.armv6l-linux-gnueabihf.tar.gz/md5/629aad4ac45ba23becd8a26df188638c
-MPFR.v4.2.1+2.armv6l-linux-gnueabihf.tar.gz/sha512/bb05a8bf127eb16608a82037546f48462cb6168e1adcdb2c60dc3bd08f62cff30cf603abcab87bb336305d37dbb7b0480ea8f6664191879bdcd487738a33dd99
-MPFR.v4.2.1+2.armv6l-linux-musleabihf.tar.gz/md5/0c3c026051b096d98c8d476dd44db334
-MPFR.v4.2.1+2.armv6l-linux-musleabihf.tar.gz/sha512/9e791fe9748c87068c167517883cc905fe51ea38d2db89562a7a0959cfd83b268eed2897e5eaaf90c0b0b08a4efd8039bdeece64e83b17bf1d676570d13c2b98
-MPFR.v4.2.1+2.armv7l-linux-gnueabihf.tar.gz/md5/a2433a717e49ad95c3e430a538d01134
-MPFR.v4.2.1+2.armv7l-linux-gnueabihf.tar.gz/sha512/abde21a943d4af312e0d44b1ff1d4aefa10b2f38c74ff0e04c0c2b8561750ef5d164679564ffe1b551821d83ebcafbe99467230b37fe4591c593a24dfb070c6a
-MPFR.v4.2.1+2.armv7l-linux-musleabihf.tar.gz/md5/4c892b4cbf1926d5d2b6a88330015c8f
-MPFR.v4.2.1+2.armv7l-linux-musleabihf.tar.gz/sha512/24825bb1268ef2ea42894ec9ff6589308abae430dd8e43a2ca0d368f1e718fd3cdf6d9bc4bc383346970ba845d2ef1721c4848ee0c783d09addc5505131db3e6
-MPFR.v4.2.1+2.i686-linux-gnu.tar.gz/md5/0b1e0268dcaeb3aa0f7f0a6451c6b841
-MPFR.v4.2.1+2.i686-linux-gnu.tar.gz/sha512/f0ef142c7b86e8f92b78a7ff0607da70bf8f3970b118fa77438cbb0acbea604dc0c7566b52ff1f85b179aac7661b31e4aee049f2c5ff799c95b385ba9cde2a25
-MPFR.v4.2.1+2.i686-linux-musl.tar.gz/md5/2fc9a938e76e7bdc0b73d7e8bfc8b8ee
-MPFR.v4.2.1+2.i686-linux-musl.tar.gz/sha512/4aed3884ad569b7695b9383db9d9dbb279ffe5349f7757b867ff860fa600b47faa4c169f4a60409666ce45fc6e6f269c18cef2df6fa0585f056d7e07e55005b8
-MPFR.v4.2.1+2.i686-w64-mingw32.tar.gz/md5/d13c44bb28d721107639c8555db5e157
-MPFR.v4.2.1+2.i686-w64-mingw32.tar.gz/sha512/1b5562d2df322c28bd06bb4ba8c9039cf90ed62affcf7f2b0d7ae8925d503c76a0d3d2f9b65c8c55575f245a4df8fbc4c7c63e93e7b973188f203a7fbda4eac5
-MPFR.v4.2.1+2.powerpc64le-linux-gnu.tar.gz/md5/52b3912b2c5f59ab3dcd7c3e06ca41b5
-MPFR.v4.2.1+2.powerpc64le-linux-gnu.tar.gz/sha512/533cf1f93c4464b4bed1d56ea79946fc2d20f3a7825d6b0383ed98cec99f85713e7bca549fd8948adb69aedc14e5d14a54238b3e67ef103e1b049b0cfb6cc1c9
-MPFR.v4.2.1+2.riscv64-linux-gnu.tar.gz/md5/aef7709c8457ee2db2622c39f1da16b7
-MPFR.v4.2.1+2.riscv64-linux-gnu.tar.gz/sha512/7a9c88563e3e7ab22a3aaa45690ed89c3e7eb22333a3d45c5e04ad2660c91ad2c97f10cd6c1aa1ccfdbf97186f9fd7f92330a41ec0be026e2ff84c5ba91f2652
-MPFR.v4.2.1+2.x86_64-apple-darwin.tar.gz/md5/12afc9778e39a5b6d9ea0161e2c80a95
-MPFR.v4.2.1+2.x86_64-apple-darwin.tar.gz/sha512/a9070423a898fa865740753ae7513d3cc0b500bd9b6b5c6aa672833dcac429efd806eff48501b51afcba5db0d31e79dac243b11b2f8847a1551576c6131506f5
-MPFR.v4.2.1+2.x86_64-linux-gnu.tar.gz/md5/46c6a5f40243795bdff51bd68a89c82e
-MPFR.v4.2.1+2.x86_64-linux-gnu.tar.gz/sha512/df8209d69ae55dd54491055078f113f4ac8be7bc68e1c0eb62944e6c9c04ed3e9a55c4a5f28ec68eb69f558d9f4d1b975f36de572fbd0ef7720568efc8042327
-MPFR.v4.2.1+2.x86_64-linux-musl.tar.gz/md5/045236ee0d558d2eda42df76c3397f69
-MPFR.v4.2.1+2.x86_64-linux-musl.tar.gz/sha512/52b68a673160af7cd09b191f3c28e17d5af7516b5baa86c0df9cb63a116772a15b5358f3db5f0b254b5752c652f8959454667cc1726ea4ff30946e3bbdb90ab4
-MPFR.v4.2.1+2.x86_64-unknown-freebsd.tar.gz/md5/da3da71bc7572eca5bc3d3895abf73c2
-MPFR.v4.2.1+2.x86_64-unknown-freebsd.tar.gz/sha512/4270b83ebe72d431f8fd9127b2b8d3bd75c2e52c563d390a4ca8d40c0514f5996fce57746d07b7d3bcbf93bfe78d420f815fde5eda4d84a5bcb7b7cf0e092504
-MPFR.v4.2.1+2.x86_64-w64-mingw32.tar.gz/md5/2a6f5ccb8d45591a845ad43916beb85a
-MPFR.v4.2.1+2.x86_64-w64-mingw32.tar.gz/sha512/db9ecc9d8247fe4421c4cc9c6ab540e17a7445056b7a1062d4e334b353783a1c067062fd8e6f0517d8bd8782c9bb75abcce8ab8247be707ba066dc90b7fc12ff
-mpfr-4.2.1.tar.bz2/md5/7765afa036e4ce7fb0e02bce0fef894b
-mpfr-4.2.1.tar.bz2/sha512/c81842532ecc663348deb7400d911ad71933d3b525a2f9e5adcd04265c9c0fdd1f22eca229f482703ac7f222ef209fc9e339dd1fa47d72ae57f7f70b2336a76f
+MPFR.v4.2.2+0.aarch64-apple-darwin.tar.gz/md5/01a13215fd646c761e469f36f693fdc8
+MPFR.v4.2.2+0.aarch64-apple-darwin.tar.gz/sha512/da473776ac8c687ab34792235ee5e1e08dc6a2e29b73620bd6dac93db32397037ae502b8ac3a35e020f722dae7da007a060e5e11e3287c4cdb846bf7e5168297
+MPFR.v4.2.2+0.aarch64-linux-gnu.tar.gz/md5/58ca9f3e08a388c3e40692e623f3884e
+MPFR.v4.2.2+0.aarch64-linux-gnu.tar.gz/sha512/c6846d982ce1211791b466ed6fed2aad9e5f9a4866c48db99eb288dcbb1480660772010869fdea66d6453c8c140c92e367cfe55f6087fe41ea040fbd77eafe34
+MPFR.v4.2.2+0.aarch64-linux-musl.tar.gz/md5/2ff7e1400f27d049e3274a6277322860
+MPFR.v4.2.2+0.aarch64-linux-musl.tar.gz/sha512/388f7050288be9d30c4a2e772c0859e414b0cf6dbc845eec0eb6aeda53595df94a4e3001d02fa04c173fcf74e00c2552a8880b62ebf5adf443da2a95497be891
+MPFR.v4.2.2+0.aarch64-unknown-freebsd.tar.gz/md5/d1e6c477ab9678d1cd1dfa7e00366e69
+MPFR.v4.2.2+0.aarch64-unknown-freebsd.tar.gz/sha512/897174756651d01272d86bb147f5dda9f84f8f1bf1fe02b8505e141df3cc38523019f85cbe538fcc6ea8073d7743fc6428a06271107b059de80cd8f959c52daa
+MPFR.v4.2.2+0.armv6l-linux-gnueabihf.tar.gz/md5/5213b0ef1b191c529e3335e05b918003
+MPFR.v4.2.2+0.armv6l-linux-gnueabihf.tar.gz/sha512/bbcdb90f80d8cb826cd055eb41f051890c7847fc0887389b61bd24c051d35873af36672e5f1956cc3fb23b8e3ee50ee069c185fc2faabe302787d70210bd5b07
+MPFR.v4.2.2+0.armv6l-linux-musleabihf.tar.gz/md5/9a9d9207a6b52b6e84b1b2b1c631e0f2
+MPFR.v4.2.2+0.armv6l-linux-musleabihf.tar.gz/sha512/fd40d16a40b1db2b441339e5c8cb3f8a1810d2889713b0504f9bfd5451f4f4c2dd0ca35a4b2922feca9cf50e4a9b3bf8cf2c088655dd85a23c33ee67c12e0a72
+MPFR.v4.2.2+0.armv7l-linux-gnueabihf.tar.gz/md5/44532dd5607ced01a8ba0856c3bfdbc3
+MPFR.v4.2.2+0.armv7l-linux-gnueabihf.tar.gz/sha512/469fc030f458bd52f6bdffc442ceaaf8659f0f1e40d581eb1303fd4753d2c665fcb75bc6c54d04eb53d77b1945d67f48a5ea5614f2ee82cc7fd27e89859b45f4
+MPFR.v4.2.2+0.armv7l-linux-musleabihf.tar.gz/md5/fbd13b054b8d27be6bc836283f7846bf
+MPFR.v4.2.2+0.armv7l-linux-musleabihf.tar.gz/sha512/926dc03f99a6827c833614d17c5ef4f80fb862bdf4397db9aaf8ae9b3a66e8b9121cfa044b18db46f5774abbd7e9c129363183ccb2ae3192084711e7ff9d6382
+MPFR.v4.2.2+0.i686-linux-gnu.tar.gz/md5/da6fbb90dc20830af9325cfaf3544e4c
+MPFR.v4.2.2+0.i686-linux-gnu.tar.gz/sha512/d235884e1d1bef406b1e5ceb9c34aab68c1a8040b2022964105238ef8cdfd4af7aebe474fef80849689ca88d9168697fd55e8d6ab92b6641a1f37c431d5e3ff3
+MPFR.v4.2.2+0.i686-linux-musl.tar.gz/md5/fc885092e1469a06aaaaf24168e8fafe
+MPFR.v4.2.2+0.i686-linux-musl.tar.gz/sha512/5307926e1222b302e48e2f5c08479b920279d15b95937a245e16ac1dfd5c6206cb64fe4b6ca4cb7d6be847d8cc01a04d2661a630b978dc2dbd60605d222b8b21
+MPFR.v4.2.2+0.i686-w64-mingw32.tar.gz/md5/55f129d5b5b849b3bc018e68ccf14914
+MPFR.v4.2.2+0.i686-w64-mingw32.tar.gz/sha512/9a24e4616e05f5c1fb53e7a12167f7a55d05ec1895124d6ee23b2efd548f49e4c7995c16d240ec803f352d586ae4667027ee0bdeefa520e0c1f581fcc338dc44
+MPFR.v4.2.2+0.powerpc64le-linux-gnu.tar.gz/md5/6f47e4cde45ddf0cb2ea4f31ef9c9e04
+MPFR.v4.2.2+0.powerpc64le-linux-gnu.tar.gz/sha512/4fd8fbe166e719c636e430d4d5c938231fa9126b29eacbc678d2eb50d3d4b95cf6ccef155ce401c6d33b9730c2f89c0c77ec8fb39254483c2e4004639c503c1c
+MPFR.v4.2.2+0.riscv64-linux-gnu.tar.gz/md5/c4736705ff2a55cf8206c3af84bfc417
+MPFR.v4.2.2+0.riscv64-linux-gnu.tar.gz/sha512/e1e77d64ee88de2990fbc791d7307afe859cfbdc1ac67e7bdfa633627b5542ce2e3ee0cd9fe4036abfaf60509277a43f263e2155665ac2c5e38b8627e470f399
+MPFR.v4.2.2+0.x86_64-apple-darwin.tar.gz/md5/c3e983178a1e9600f42714d4cc1ecdf6
+MPFR.v4.2.2+0.x86_64-apple-darwin.tar.gz/sha512/c5c6cebcdfc5b7b84e9e217a81d99e5af78d163949745d570af5689210b3eedeb9de3c11991b1b36d8fdbee17b550a4072af951d19c3f863cf24cda7d9c12950
+MPFR.v4.2.2+0.x86_64-linux-gnu.tar.gz/md5/61fc7c7aa676d0a07e1709b433a8e423
+MPFR.v4.2.2+0.x86_64-linux-gnu.tar.gz/sha512/74bdefa72c51c82ca709e3494cd664a6593173bbfbe0198f18f4c0add06ce4c1217e4dd49e99cb151d71c85cd696ae2147aed29ed2cf3f1ca0e5b40582abb571
+MPFR.v4.2.2+0.x86_64-linux-musl.tar.gz/md5/207ee8ad2293ba36d3d7bb845ab346e0
+MPFR.v4.2.2+0.x86_64-linux-musl.tar.gz/sha512/63325e6595861a324f3c299d8c51b1d665197217c8fc9a5ae627b624037394f050bb08a9acd14e9809f982942c066f1185dded0fa493f360bcd3baae17a05f92
+MPFR.v4.2.2+0.x86_64-unknown-freebsd.tar.gz/md5/74e5a5ce0ea84959ccec7b7f7ab22c66
+MPFR.v4.2.2+0.x86_64-unknown-freebsd.tar.gz/sha512/411dbb339218669af6181fdf1e17f926abb9830ae54a8f9ef1b7df53021e8da01a41fda13067731afaf9b803324d5f82c060ef5b5b91045625188458b99dcc75
+MPFR.v4.2.2+0.x86_64-w64-mingw32.tar.gz/md5/2de84b494ea832147be4f9bfa786cd19
+MPFR.v4.2.2+0.x86_64-w64-mingw32.tar.gz/sha512/5f86aef6ab4fd7517cb23ad9a32ae21954a3ce1f27f5cbd28abe038271e20197b7c241055092a4aa6d5391f012bdee10465c58b53acd64bb5b99fd754c75ad29
+mpfr-4.2.2.tar.bz2/md5/afe8268360bc8702fbc8297d351c8b5e
+mpfr-4.2.2.tar.bz2/sha512/0176e50808dcc07afbf5bc3e38bf9b7b21918e5f194aa0bfd860d99b00c470630aef149776c4be814a61c44269c3a5b9a4b0b1c0fcd4c9feb1459d8466452da8
diff --git a/deps/checksums/openssl b/deps/checksums/openssl
index c973f592861f3..3b41bfa69231d 100644
--- a/deps/checksums/openssl
+++ b/deps/checksums/openssl
@@ -1,38 +1,38 @@
-OpenSSL.v3.0.15+2.aarch64-apple-darwin.tar.gz/md5/d11d92e6530705e3d93925bbb4dfccff
-OpenSSL.v3.0.15+2.aarch64-apple-darwin.tar.gz/sha512/e30d763d956f930c3dab961ef1b382385b78cbb2324ae7f5e943420b9178bc2b086d9877c2d2b41b30a92ca109d7832a2ae50f70547fcc9788e25889d8252ffc
-OpenSSL.v3.0.15+2.aarch64-linux-gnu.tar.gz/md5/d29f0d3a35d592488ba3a8bbb0dc8d0e
-OpenSSL.v3.0.15+2.aarch64-linux-gnu.tar.gz/sha512/67c527c1930b903d2fbb55df1bd3fc1b8394bc4fadd15dd8fb84e776bae8c448487c117492e22b9b014f823cc7fe709695f4064639066b10427b06355540e997
-OpenSSL.v3.0.15+2.aarch64-linux-musl.tar.gz/md5/4f5313f1f18e29585951e95372a7a0fe
-OpenSSL.v3.0.15+2.aarch64-linux-musl.tar.gz/sha512/48007a1f6667d6aeb87cc7287723ed00e39fe2bc9c353ff33348442516f1a28961985cc4a29a2a8f76b3a7049bd955973562d7c6c4af43af884596def636f7f8
-OpenSSL.v3.0.15+2.aarch64-unknown-freebsd.tar.gz/md5/5b6041353197bb8f75b39ed8f58cf4e9
-OpenSSL.v3.0.15+2.aarch64-unknown-freebsd.tar.gz/sha512/9be617d51fdc167085887380e720e6baf8e1e180f455b297f44d0bc0862fd490f015b5292d952d4ad095750cde796cc7dac4f901389b73135cb399b3a9d378c1
-OpenSSL.v3.0.15+2.armv6l-linux-gnueabihf.tar.gz/md5/858f548a28e289153842226473138a3e
-OpenSSL.v3.0.15+2.armv6l-linux-gnueabihf.tar.gz/sha512/f9385678fca65d1fb8d96756442518b16607a57a9b6d76991414b37dfc4e30a7e1eebe5f3977b088b491216af4a34f958b64fe95062ee9ae23a9212f46c4e923
-OpenSSL.v3.0.15+2.armv6l-linux-musleabihf.tar.gz/md5/c4e52ecb4f9e24d948724424f1070071
-OpenSSL.v3.0.15+2.armv6l-linux-musleabihf.tar.gz/sha512/12f9276c68049026f2741c7d97e62d24525e5e832911546e1ea3868362034e6384304d749730122edf828b8c5573084055d59cc0bd75bda32f000ce630837c2b
-OpenSSL.v3.0.15+2.armv7l-linux-gnueabihf.tar.gz/md5/767d3f3047366ccd6e2aa275f80d9f6c
-OpenSSL.v3.0.15+2.armv7l-linux-gnueabihf.tar.gz/sha512/17700fd33c221070a7dd2db79d045e102591b85e16b3d4099356fb6a8635aea297b5fcef91740f75c55344a12ed356772b3b85c0cc68627856093ceb53ea8eb3
-OpenSSL.v3.0.15+2.armv7l-linux-musleabihf.tar.gz/md5/3ef2385cb1fec9e2d3af2ba9385ac733
-OpenSSL.v3.0.15+2.armv7l-linux-musleabihf.tar.gz/sha512/6156e9431fa8269b8d037149271be6cca0b119be67be01cfd958dabf59cdd468ef2a5ebf885e5835585006efdedd29afc308076283d070d4ae743146b57cd2b1
-OpenSSL.v3.0.15+2.i686-linux-gnu.tar.gz/md5/e62992d214cec6b1970f9fbd04cb8ecd
-OpenSSL.v3.0.15+2.i686-linux-gnu.tar.gz/sha512/dfdb3d2d1d5fed7bf1c322899d6138c81f0653350f4b918858dd51bf7bcc86d2d04de824533925fa5f8d366a5c18ee33ade883f50a538b657717f8a428be8c60
-OpenSSL.v3.0.15+2.i686-linux-musl.tar.gz/md5/186a6bb8055ce089ac0c9897bd2cd697
-OpenSSL.v3.0.15+2.i686-linux-musl.tar.gz/sha512/f3c8d608113e9b0e91dd6af697172a46892d4a66572e35e13ad394397291dded3042667c1ec4fafe051778e71ff56a876dc3e848a2b85cef9f925ef3969ab950
-OpenSSL.v3.0.15+2.i686-w64-mingw32.tar.gz/md5/b72b8e4883337e4bc90094dce86c8b8b
-OpenSSL.v3.0.15+2.i686-w64-mingw32.tar.gz/sha512/3b5ddef15ca1463ab92ef5b88df36f8418c8c44ffb123a0922e55718ab317b5fe379994aba9a5e8ca112475043d5cf99b1574702cdb30de438f458ee06ac80ea
-OpenSSL.v3.0.15+2.powerpc64le-linux-gnu.tar.gz/md5/da194ce6f37f34cc19cc78d25c9af5e2
-OpenSSL.v3.0.15+2.powerpc64le-linux-gnu.tar.gz/sha512/e256a9d9a0af8764de730419281aa4d3ee9f6146692ec9105a318d8301d8fda5cca82c6ef4d0d7b70d721992361771724b237ce26ef81f92c295f6056d5a7cdd
-OpenSSL.v3.0.15+2.riscv64-linux-gnu.tar.gz/md5/86825ee5f83ec0c827d5c051fe1a3d41
-OpenSSL.v3.0.15+2.riscv64-linux-gnu.tar.gz/sha512/7db4ae2f0a9491ae484da5b8b0c3698d970ada91c83f9783c9e5bd92006f52dffa1a4c7fb282b63e34760199a97c52793040dc306ad0986970cfa233e29cb195
-OpenSSL.v3.0.15+2.x86_64-apple-darwin.tar.gz/md5/271cc359f5bc4718659044ad5ac7631d
-OpenSSL.v3.0.15+2.x86_64-apple-darwin.tar.gz/sha512/10e7575dc4cce6c617c96e6f94dbfe3058aad696292d3fac4bde7c92623f2a849b7d10e35b156b7582294b3cf103d61b3ea73605f958ee4c9f8ff05b647939a7
-OpenSSL.v3.0.15+2.x86_64-linux-gnu.tar.gz/md5/5d045d93d632af9914bff551f67eed9b
-OpenSSL.v3.0.15+2.x86_64-linux-gnu.tar.gz/sha512/240791382d9549be029e2d404bc0e962f9876ab0597bf20cf34c87fcfafc3d75ba9f223641287895f9aee8519a5a33293910ed6d67bc1424ff3513eedaa8b699
-OpenSSL.v3.0.15+2.x86_64-linux-musl.tar.gz/md5/bb2637babf3730ed1117f89cb8aab34a
-OpenSSL.v3.0.15+2.x86_64-linux-musl.tar.gz/sha512/b847539acc00870f77b242eeccfcf16f590493b7deb0089fa3654026f4016d40f9595d3bbb21ab981e9decfde4321da71f162beb1837a158fd3a884375a86fee
-OpenSSL.v3.0.15+2.x86_64-unknown-freebsd.tar.gz/md5/23b69e0256e6c86e026be3ade20aed5c
-OpenSSL.v3.0.15+2.x86_64-unknown-freebsd.tar.gz/sha512/1b7da1e13d325c7776b8e1a63aaa334bd633bb10604f8bed5f5f6a81955268b3d11ad221a5dd181dbdc7ad27c35d5754e6875d36226003c2fd7da6cd91854de1
-OpenSSL.v3.0.15+2.x86_64-w64-mingw32.tar.gz/md5/73cf4138ab403b7c9f91368a030590f9
-OpenSSL.v3.0.15+2.x86_64-w64-mingw32.tar.gz/sha512/052bb52837c29b4b18a97df71a80ad77486bd6ccef6e2e57dfa68a02754180976dc0302a158886393ef13fe91904f963119b17429a4ecc6f8b6c80ff878df05d
-openssl-3.0.15.tar.gz/md5/08f458c00fff496a52ef931c481045cd
-openssl-3.0.15.tar.gz/sha512/acd80f2f7924d90c1416946a5c61eff461926ad60f4821bb6b08845ea18f8452fd5e88a2c2c5bd0d7590a792cb8341a3f3be042fd0a5b6c9c1b84a497c347bbf
+OpenSSL.v3.5.0+0.aarch64-apple-darwin.tar.gz/md5/b8dc9909528f769bd9ac56cf2681f387
+OpenSSL.v3.5.0+0.aarch64-apple-darwin.tar.gz/sha512/0d9ea24d8f856c31c8b88afa1de317d13aff1f1f60b309e06e77eea91d195526ec91ed2d077f0dbb75370c17b8875c24d3066e6872bbef04312616e99d0aff3d
+OpenSSL.v3.5.0+0.aarch64-linux-gnu.tar.gz/md5/7cf5baeacf4d882b547c229758a9fa9b
+OpenSSL.v3.5.0+0.aarch64-linux-gnu.tar.gz/sha512/726ceee82379e667a65abe27c482d3b57e611c630d82b1314f6d385f0f2e8256835ef707c2e015f9204d563d7ee469bed2dee88d245af81dcde2af3b8331b19c
+OpenSSL.v3.5.0+0.aarch64-linux-musl.tar.gz/md5/4601e56eaed365548203752a19f4f8e8
+OpenSSL.v3.5.0+0.aarch64-linux-musl.tar.gz/sha512/da349081850d47b9393665c4365787c26f61471362475c2acd3c8205063d09a785f7b6c836ba6793e880440115b19e85821b4d1938e57dafea0cabb45048a70b
+OpenSSL.v3.5.0+0.aarch64-unknown-freebsd.tar.gz/md5/6a9e78436727e67af2f537170e18445e
+OpenSSL.v3.5.0+0.aarch64-unknown-freebsd.tar.gz/sha512/4dc2f7a39f17255871773d10ed1b74de5c908af0f7a4bd3f94fd71bc12480fd4cdee0bd859a154328218935f004eee20359dacc353e366c47ed890229a579fc4
+OpenSSL.v3.5.0+0.armv6l-linux-gnueabihf.tar.gz/md5/5c751092c27910a48cab31f87700fe19
+OpenSSL.v3.5.0+0.armv6l-linux-gnueabihf.tar.gz/sha512/b44e2356f719549dd831745963b8c74346be173d176ca15ab2ee6f4a1ec7e105086d89115cb76831a3251eb67bf7c5ff5cba3a03fd4614a3501af235a8e03beb
+OpenSSL.v3.5.0+0.armv6l-linux-musleabihf.tar.gz/md5/fc05f9645ff000b21e46951f16833fb0
+OpenSSL.v3.5.0+0.armv6l-linux-musleabihf.tar.gz/sha512/8c960294fe542ab9d9ae7dc283c0c30621f348ff8011a9a47f38c1460234b3b128011426c3e5d0cb6c9b02fbee261b7b264d0b0c55bdf3be2a2cd5bdd210d71d
+OpenSSL.v3.5.0+0.armv7l-linux-gnueabihf.tar.gz/md5/8928d47a0f549d15240eb934caddf599
+OpenSSL.v3.5.0+0.armv7l-linux-gnueabihf.tar.gz/sha512/4b5dbfb3a4ea4ebe6510cbe63da2de0bb3762a0fc98946acbb059e9a92791ac65a3519577250dcb571fa07f29be182f165a5d4fa05fc96b60270441adab30e74
+OpenSSL.v3.5.0+0.armv7l-linux-musleabihf.tar.gz/md5/1e4c11043d05bea0fcdbf92525152c51
+OpenSSL.v3.5.0+0.armv7l-linux-musleabihf.tar.gz/sha512/e9514cd0c3a8c3659ff87d505490ca3011a65800222b21e4f932bc2a80fb38bb11de1d13925c3a6313f6bea1c2baf35b38b3db18193ac11ec42eb434edee3418
+OpenSSL.v3.5.0+0.i686-linux-gnu.tar.gz/md5/ee699f302edd1f7677baa565ae631c74
+OpenSSL.v3.5.0+0.i686-linux-gnu.tar.gz/sha512/16dd396b192b4ca23d1fad54d130a92ef43a36259482cd3b276001d084711ef8674dcd167c9f832f5989d6197889af69d2ae6bcef3e6b9f538066bf347c89584
+OpenSSL.v3.5.0+0.i686-linux-musl.tar.gz/md5/e6ffea118acb68d39ccb13df51e15125
+OpenSSL.v3.5.0+0.i686-linux-musl.tar.gz/sha512/44335dcaf144d388bd47dd80db08862f4cff1d5a90861f34001127df74d0d16babedbe0ffd02ab398bddd17ecda605f433a940b3cc5159947cb54810a564b0df
+OpenSSL.v3.5.0+0.i686-w64-mingw32.tar.gz/md5/8ef4284ac47a6b45f8c5b01d203ae668
+OpenSSL.v3.5.0+0.i686-w64-mingw32.tar.gz/sha512/d7ea8c94d54a139631f2710cb2c47c0387b694e60dc7afddbca3c6705e17d25ec8958a84b4424edd1ea27d6d1c78457fbacd92f7634345f4ccc1a81cf242c28f
+OpenSSL.v3.5.0+0.powerpc64le-linux-gnu.tar.gz/md5/21674471f2a3352ede9aef3454381edd
+OpenSSL.v3.5.0+0.powerpc64le-linux-gnu.tar.gz/sha512/5615d438db7c3e97dc422b260b3152cd89a2412b7b9b5d7cea36b0ce471fbd3f1a2e8a9d77f399e257f5c38b8b5dfc256acfbdbe2645ba47b89c177dadd066e9
+OpenSSL.v3.5.0+0.riscv64-linux-gnu.tar.gz/md5/7761384fd5991eb56286f24c9a0fbdba
+OpenSSL.v3.5.0+0.riscv64-linux-gnu.tar.gz/sha512/e63d5f7ddc368f4cdb03c299361faef7274930c622404907c3560eb04e6110f851b9a201b402bb6e52fdafe64988f909c209f659f84ba77957eb45a933c8baf1
+OpenSSL.v3.5.0+0.x86_64-apple-darwin.tar.gz/md5/a970728a9aa6f25d56db7e43e7b0cae2
+OpenSSL.v3.5.0+0.x86_64-apple-darwin.tar.gz/sha512/8ab5b2dd90914e193d1f7689c8560228d03cb6ee79fd43a48ae9339b61274fea0557a2bf3a7ae4ce4d4b51630aede55d6d6e860f263e1ffc0bfd6141367a9514
+OpenSSL.v3.5.0+0.x86_64-linux-gnu.tar.gz/md5/4530c0e1791b0eaec99b68f2967a3c2f
+OpenSSL.v3.5.0+0.x86_64-linux-gnu.tar.gz/sha512/ba952738be38f52ebc23f48c52c12c1bec9c8b81416264612da21ca21f23604c8e59bf49f73d4b80256ea17b6b662179620deadb8660be98d8ad5ed57e346394
+OpenSSL.v3.5.0+0.x86_64-linux-musl.tar.gz/md5/eb49cefbb938d80198dbab90e1ad9108
+OpenSSL.v3.5.0+0.x86_64-linux-musl.tar.gz/sha512/f038e9bd950e4472cdd82b0c39aebbfd60e75cdf24fd8408d39e4db0793813c9d30471d1ca8d112b0bb4049f18f8fb36b4c3069dfce61032dc73cb6568852b77
+OpenSSL.v3.5.0+0.x86_64-unknown-freebsd.tar.gz/md5/25023844dae8c7d326620b1f9e730a07
+OpenSSL.v3.5.0+0.x86_64-unknown-freebsd.tar.gz/sha512/e38f1f7c452903a09b3f0127e377d5e46e538903f9a58076e53dfc53883b2423463d3fdcf13dc961516965b6dbc2d289bfbfa1027f8c3110a61bdee060bccf73
+OpenSSL.v3.5.0+0.x86_64-w64-mingw32.tar.gz/md5/a73f5220598dfc5e71e1eee6b26f7a27
+OpenSSL.v3.5.0+0.x86_64-w64-mingw32.tar.gz/sha512/c028527230b6e9e675b7e22a21997e5d032e1099dd1f3437c6e764b7967fd0196d4cb46d66b36f2f6ddeb8200f445aa8d6a7a61f7be61288ee5e0e510b5800f8
+openssl-3.5.0.tar.gz/md5/51da7d2bdf7f4f508cb024f562eb9b03
+openssl-3.5.0.tar.gz/sha512/39cc80e2843a2ee30f3f5de25cd9d0f759ad8de71b0b39f5a679afaaa74f4eb58d285ae50e29e4a27b139b49343ac91d1f05478f96fb0c6b150f16d7b634676f
diff --git a/deps/libgit2.version b/deps/libgit2.version
index 6bfb6106e67d2..3f1f7a66fe972 100644
--- a/deps/libgit2.version
+++ b/deps/libgit2.version
@@ -11,4 +11,4 @@ LIBGIT2_SHA1=338e6fb681369ff0537719095e22ce9dc602dbf0
# The versions of cacert.pem are identified by the date (YYYY-MM-DD) of their changes.
# See https://curl.haxx.se/docs/caextract.html for more details.
# Keep in sync with `stdlib/MozillaCACerts_jll/Project.toml`.
-MOZILLA_CACERT_VERSION := 2024-12-31
+MOZILLA_CACERT_VERSION := 2025-02-25
diff --git a/deps/mmtk_julia.mk b/deps/mmtk_julia.mk
index 424113fd4164c..1dc59749a00b5 100644
--- a/deps/mmtk_julia.mk
+++ b/deps/mmtk_julia.mk
@@ -75,6 +75,25 @@ endif # MMTK_JULIA_DIR
else
# We are building using the BinaryBuilder version of the binding
+# This will download all the versions of the binding that are available in the BinaryBuilder
$(eval $(call bb-install,mmtk_julia,MMTK_JULIA,false))
+# Make sure we use the right version of $MMTK_PLAN, $MMTK_MOVING and $MMTK_BUILD
+ifeq (${MMTK_PLAN},Immix)
+LIB_PATH_PLAN = immix
+else ifeq (${MMTK_PLAN},StickyImmix)
+LIB_PATH_PLAN = sticky
+endif
+
+ifeq ($(MMTK_MOVING), 0)
+LIB_PATH_MOVING := non_moving
+else
+LIB_PATH_MOVING := moving
+endif
+
+version-check-mmtk_julia: $(BUILDROOT)/usr/lib/libmmtk_julia.so
+
+$(BUILDROOT)/usr/lib/libmmtk_julia.so: get-mmtk_julia
+ @ln -sf $(BUILDROOT)/usr/lib/$(LIB_PATH_PLAN)/$(LIB_PATH_MOVING)/$(MMTK_BUILD)/libmmtk_julia.so $@
+
endif # USE_BINARYBUILDER_MMTK_JULIA
diff --git a/deps/mmtk_julia.version b/deps/mmtk_julia.version
index 684197bbe3e4e..530b6d9ed81e1 100644
--- a/deps/mmtk_julia.version
+++ b/deps/mmtk_julia.version
@@ -1,6 +1,6 @@
MMTK_JULIA_BRANCH = master
-MMTK_JULIA_SHA1 = c9e046baf3a0d52fe75d6c8b28f6afd69b045d95
+MMTK_JULIA_SHA1 = 10ad6638b69b31a97a844f2f4e651e5ccea4e298
MMTK_JULIA_GIT_URL := https://github.com/mmtk/mmtk-julia.git
-MMTK_JULIA_TAR_URL = https://github.com/mmtk/mmtk-julia/archive/refs/tags/v0.30.4.tar.gz
-MMTK_JULIA_JLL_VER := 0.30.4+0
+MMTK_JULIA_TAR_URL = https://github.com/mmtk/mmtk-julia/archive/refs/tags/v0.30.5.tar.gz
+MMTK_JULIA_JLL_VER := 0.30.5+1
MMTK_JULIA_JLL_NAME := mmtk_julia
diff --git a/deps/mpfr.version b/deps/mpfr.version
index ec109e181ecdc..70585f95a6385 100644
--- a/deps/mpfr.version
+++ b/deps/mpfr.version
@@ -1,5 +1,6 @@
+# -*- makefile -*-
## jll artifact
MPFR_JLL_NAME := MPFR
## source build
-MPFR_VER := 4.2.1
+MPFR_VER := 4.2.2
diff --git a/deps/openblas.mk b/deps/openblas.mk
index e5a988ba84df2..80881a6a13c4e 100644
--- a/deps/openblas.mk
+++ b/deps/openblas.mk
@@ -44,7 +44,7 @@ OPENBLAS_CFLAGS := -O2
# Decide whether to build for 32-bit or 64-bit arch
ifneq ($(XC_HOST),)
-OPENBLAS_BUILD_OPTS += OSNAME=$(OS) CROSS=1 HOSTCC=$(HOSTCC) CROSS_SUFFIX=$(CROSS_COMPILE)
+OPENBLAS_BUILD_OPTS += OSNAME=$(OS) CROSS=1 HOSTCC="$(HOSTCC)" CROSS_SUFFIX=$(CROSS_COMPILE)
endif
ifeq ($(OS),WINNT)
ifneq ($(ARCH),x86_64)
diff --git a/deps/openssl.version b/deps/openssl.version
index 7253e063167db..49c463aad1565 100644
--- a/deps/openssl.version
+++ b/deps/openssl.version
@@ -3,4 +3,4 @@
OPENSSL_JLL_NAME := OpenSSL
## source build
-OPENSSL_VER := 3.0.15
+OPENSSL_VER := 3.5.0
diff --git a/doc/Manifest.toml b/doc/Manifest.toml
index 481b7240c0caf..05dd8edebe5a6 100644
--- a/doc/Manifest.toml
+++ b/doc/Manifest.toml
@@ -45,9 +45,9 @@ version = "0.9.3"
[[deps.Documenter]]
deps = ["ANSIColoredPrinters", "AbstractTrees", "Base64", "CodecZlib", "Dates", "DocStringExtensions", "Downloads", "Git", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "MarkdownAST", "Pkg", "PrecompileTools", "REPL", "RegistryInstances", "SHA", "TOML", "Test", "Unicode"]
-git-tree-sha1 = "d0ea2c044963ed6f37703cead7e29f70cba13d7e"
+git-tree-sha1 = "182a9a3fe886587ba230a417f1651a4cbc2b92d4"
uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
-version = "1.8.0"
+version = "1.8.1"
[[deps.Downloads]]
deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"]
@@ -117,7 +117,7 @@ version = "0.6.4"
[[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "OpenSSL_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
-version = "8.9.1+0"
+version = "8.11.1+1"
[[deps.LibGit2]]
deps = ["LibGit2_jll", "NetworkOptions", "Printf", "SHA"]
@@ -127,12 +127,12 @@ version = "1.11.0"
[[deps.LibGit2_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "OpenSSL_jll"]
uuid = "e37daf67-58a4-590a-8e99-b0245dd2ffc5"
-version = "1.8.4+0"
+version = "1.9.0+0"
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "OpenSSL_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
-version = "1.11.3+0"
+version = "1.11.3+1"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
@@ -165,21 +165,21 @@ version = "1.11.0"
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
-version = "2024.3.11"
+version = "2024.12.31"
[[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
-version = "1.2.0"
+version = "1.3.0"
[[deps.OpenSSL_jll]]
-deps = ["Artifacts", "Libdl", "NetworkOptions"]
+deps = ["Artifacts", "Libdl"]
uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95"
-version = "3.0.15+1"
+version = "3.0.15+2"
[[deps.PCRE2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "efcefdf7-47ab-520b-bdef-62a2eaa19f15"
-version = "10.43.0+1"
+version = "10.44.0+1"
[[deps.Parsers]]
deps = ["Dates", "PrecompileTools", "UUIDs"]
@@ -277,14 +277,14 @@ version = "1.11.0"
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
-version = "1.3.1+1"
+version = "1.3.1+2"
[[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
-version = "1.63.0+1"
+version = "1.64.0+1"
[[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
-version = "17.5.0+1"
+version = "17.5.0+2"
diff --git a/doc/make.jl b/doc/make.jl
index 43d51e9936b58..cb091c4bcc247 100644
--- a/doc/make.jl
+++ b/doc/make.jl
@@ -173,6 +173,7 @@ Manual = [
"manual/noteworthy-differences.md",
"manual/unicode-input.md",
"manual/command-line-interface.md",
+ "manual/worldage.md",
]
BaseDocs = [
diff --git a/doc/man/julia.1 b/doc/man/julia.1
index 2da11ae1b3f18..9646464e1e63d 100644
--- a/doc/man/julia.1
+++ b/doc/man/julia.1
@@ -321,7 +321,7 @@ Website: https://julialang.org/
.br
Documentation: https://docs.julialang.org/
.br
-Downloads: https://julialang.org/downloads/
+Install: https://julialang.org/install/
.SH LICENSING
Julia is an open-source project. It is made available under the MIT license.
diff --git a/doc/src/devdocs/ast.md b/doc/src/devdocs/ast.md
index fe63dfe35edac..1330940862b99 100644
--- a/doc/src/devdocs/ast.md
+++ b/doc/src/devdocs/ast.md
@@ -498,9 +498,9 @@ These symbols appear in the `head` field of [`Expr`](@ref)s in lowered form.
The number of required arguments for a varargs function definition.
- * `args[5]::QuoteNode{<:Union{Symbol,Tuple{Symbol,UInt16}}`: calling convention
+ * `args[5]::QuoteNode{<:Union{Symbol,Tuple{Symbol,UInt16}, Tuple{Symbol,UInt16,Bool}}`: calling convention
- The calling convention for the call, optionally with effects.
+ The calling convention for the call, optionally with effects, and `gc_safe` (safe to execute concurrently to GC.).
* `args[6:5+length(args[3])]` : arguments
@@ -661,6 +661,26 @@ for important details on how to modify these fields safely.
If max_world is the special token value `-1`, the value is not yet known.
It may continue to be used until we encounter a backedge that requires us to reconsider.
+ * Timing fields
+
+ - `time_infer_total`: Total cost of computing `inferred` originally as wall-time from start to finish.
+
+ - `time_infer_cache_saved`: The cost saved from `time_infer_total` by having caching.
+ Adding this to `time_infer_total` should give a stable estimate for comparing the cost
+ of two implementations or one implementation over time. This is generally an
+ over-estimate of the time to infer something, since the cache is frequently effective
+ at handling repeated work.
+
+ - `time_infer_self`: Self cost of julia inference for `inferred` (a portion of
+ `time_infer_total`). This is simply the incremental cost of compiling this one method,
+ if given a fully populated cache of all call targets, even including constant
+ inference results and LimitedAccuracy results, which generally are not in a cache.
+
+ - `time_compile`: Self cost of llvm JIT compilation (e.g. of computing `invoke` from
+ `inferred`). A total cost estimate can be computed by walking all of the `edges`
+ contents and summing those, while accounting for cycles and duplicates. (This field
+ currently does not include any measured AOT compile times.)
+
### CodeInfo
diff --git a/doc/src/devdocs/build/windows.md b/doc/src/devdocs/build/windows.md
index ba4af459e24d0..fa7402ff95bb6 100644
--- a/doc/src/devdocs/build/windows.md
+++ b/doc/src/devdocs/build/windows.md
@@ -32,7 +32,7 @@ or edit `%USERPROFILE%\.gitconfig` and add/edit the lines:
## Binary distribution
For the binary distribution installation notes on Windows please see the instructions at
-[https://julialang.org/downloads/platform/#windows](https://julialang.org/downloads/platform/#windows).
+[https://julialang.org/downloads/platform/#windows](https://julialang.org/downloads/platform/#windows). Note, however, that on all platforms [using `juliaup`](https://julialang.org/install/) is recommended over manually installing binaries.
## Source distribution
diff --git a/doc/src/devdocs/init.md b/doc/src/devdocs/init.md
index 1e0e1173f8695..23012d6ba1eb7 100644
--- a/doc/src/devdocs/init.md
+++ b/doc/src/devdocs/init.md
@@ -63,7 +63,7 @@ the [LLVM library](https://llvm.org).
If there is no sysimg file (`!jl_options.image_file`) then the `Core` and `Main` modules are
created and `boot.jl` is evaluated:
-`jl_core_module = jl_new_module(jl_symbol("Core"))` creates the Julia `Core` module.
+`jl_core_module = jl_new_module(jl_symbol("Core"), NULL)` creates the Julia `Core` module.
[`jl_init_intrinsic_functions()`](https://github.com/JuliaLang/julia/blob/master/src/intrinsics.cpp)
creates a new Julia module `Intrinsics` containing constant `jl_intrinsic_type` symbols. These define
diff --git a/doc/src/index.md b/doc/src/index.md
index 8c88af424e8e3..8342ff448625d 100644
--- a/doc/src/index.md
+++ b/doc/src/index.md
@@ -37,7 +37,7 @@ Markdown.parse("""
Below is a non-exhaustive list of links that will be useful as you learn and use the Julia programming language.
- [Julia Homepage](https://julialang.org)
-- [Download Julia](https://julialang.org/downloads/)
+- [Install Julia](https://julialang.org/install/)
- [Discussion forum](https://discourse.julialang.org)
- [Julia YouTube](https://www.youtube.com/user/JuliaLanguage)
- [Find Julia Packages](https://julialang.org/packages/)
diff --git a/doc/src/manual/calling-c-and-fortran-code.md b/doc/src/manual/calling-c-and-fortran-code.md
index d198c796a2e0b..aa317468b0f75 100644
--- a/doc/src/manual/calling-c-and-fortran-code.md
+++ b/doc/src/manual/calling-c-and-fortran-code.md
@@ -547,15 +547,14 @@ is not valid, since the type layout of `T` is not known statically.
### SIMD Values
-Note: This feature is currently implemented on 64-bit x86 and AArch64 platforms only.
-
If a C/C++ routine has an argument or return value that is a native SIMD type, the corresponding
Julia type is a homogeneous tuple of `VecElement` that naturally maps to the SIMD type. Specifically:
-> * The tuple must be the same size as the SIMD type. For example, a tuple representing an `__m128`
-> on x86 must have a size of 16 bytes.
-> * The element type of the tuple must be an instance of `VecElement{T}` where `T` is a primitive type that
-> is 1, 2, 4 or 8 bytes.
+> * The tuple must be the same size and elements as the SIMD type. For example, a tuple
+> representing an `__m128` on x86 must have a size of 16 bytes and Float32 elements.
+> * The element type of the tuple must be an instance of `VecElement{T}` where `T` is a
+> primitive type with a power-of-two number of bytes (e.g. 1, 2, 4, 8, 16, etc) such as
+> Int8 or Float64.
For instance, consider this C routine that uses AVX intrinsics:
@@ -628,6 +627,10 @@ For translating a C argument list to Julia:
* `T`, where `T` is a concrete Julia type
* argument value will be copied (passed by value)
+ * `vector T` (or `__attribute__ vector_size`, or a typedef such as `__m128`)
+
+ * `NTuple{N, VecElement{T}}`, where `T` is a primitive Julia type of the correct size
+ and N is the number of elements in the vector (equal to `vector_size / sizeof T`).
* `void*`
* depends on how this parameter is used, first translate this to the intended pointer type, then
@@ -674,13 +677,16 @@ For translating a C return type to Julia:
* `T`, where `T` is one of the primitive types: `char`, `int`, `long`, `short`, `float`, `double`,
`complex`, `enum` or any of their `typedef` equivalents
- * `T`, where `T` is an equivalent Julia Bits Type (per the table above)
- * if `T` is an `enum`, the argument type should be equivalent to `Cint` or `Cuint`
+ * same as C argument list
* argument value will be copied (returned by-value)
* `struct T` (including typedef to a struct)
- * `T`, where `T` is a concrete Julia Type
+ * same as C argument list
* argument value will be copied (returned by-value)
+
+ * `vector T`
+
+ * same as C argument list
* `void*`
* depends on how this parameter is used, first translate this to the intended pointer type, then
diff --git a/doc/src/manual/embedding.md b/doc/src/manual/embedding.md
index f578e10764101..f14fc1bc3ccda 100644
--- a/doc/src/manual/embedding.md
+++ b/doc/src/manual/embedding.md
@@ -54,7 +54,7 @@ linking against `libjulia`.
The first thing that must be done before calling any other Julia C function is to
initialize Julia. This is done by calling `jl_init`, which tries to automatically determine
Julia's install location. If you need to specify a custom location, or specify which system
-image to load, use `jl_init_with_image` instead.
+image to load, use `jl_init_with_image_file` or `jl_init_with_image_handle` instead.
The second statement in the test program evaluates a Julia statement using a call to `jl_eval_string`.
diff --git a/doc/src/manual/faq.md b/doc/src/manual/faq.md
index be4f331cd6233..188b8b7f79f3a 100644
--- a/doc/src/manual/faq.md
+++ b/doc/src/manual/faq.md
@@ -1090,8 +1090,7 @@ You may wish to test against the nightly version to ensure that such regressions
Finally, you may also consider building Julia from source for yourself. This option is mainly for those individuals who are comfortable at the command line, or interested in learning.
If this describes you, you may also be interested in reading our [guidelines for contributing](https://github.com/JuliaLang/julia/blob/master/CONTRIBUTING.md).
-Links to each of these download types can be found on the download page at [https://julialang.org/downloads/](https://julialang.org/downloads/).
-Note that not all versions of Julia are available for all platforms.
+The [`juliaup` install manager](https://julialang.org/install/) has pre-defined channels named `release` and `lts` for the latest stable release and the current LTS release, as well as version-specific channels.
### How can I transfer the list of installed packages after updating my version of Julia?
diff --git a/doc/src/manual/getting-started.md b/doc/src/manual/getting-started.md
index 2c69aabbda192..b0299a4563f98 100644
--- a/doc/src/manual/getting-started.md
+++ b/doc/src/manual/getting-started.md
@@ -1,7 +1,7 @@
# [Getting Started](@id man-getting-started)
Julia installation is straightforward, whether using precompiled binaries or compiling from source.
-Download and install Julia by following the instructions at [https://julialang.org/downloads/](https://julialang.org/downloads/).
+Download and install Julia by following the instructions at [https://julialang.org/install/](https://julialang.org/install/).
If you are coming to Julia from one of the following languages, then you should start by reading the section on noteworthy differences from [MATLAB](@ref Noteworthy-differences-from-MATLAB), [R](@ref Noteworthy-differences-from-R), [Python](@ref Noteworthy-differences-from-Python), [C/C++](@ref Noteworthy-differences-from-C/C) or [Common Lisp](@ref Noteworthy-differences-from-Common-Lisp). This will help you avoid some common pitfalls since Julia differs from those languages in many subtle ways.
diff --git a/doc/src/manual/integers-and-floating-point-numbers.md b/doc/src/manual/integers-and-floating-point-numbers.md
index 0ee7850c92087..fa0ee228e873b 100644
--- a/doc/src/manual/integers-and-floating-point-numbers.md
+++ b/doc/src/manual/integers-and-floating-point-numbers.md
@@ -334,8 +334,8 @@ julia> typeof(x)
Float64
```
-Half-precision floating-point numbers are also supported ([`Float16`](@ref)), but they are
-implemented in software and use [`Float32`](@ref) for calculations.
+Half-precision floating-point numbers are also supported ([`Float16`](@ref)) on all platforms, with native instructions used on hardware which supports this number format. Otherwise, operations are implemented in software, and use [`Float32`](@ref) for intermediate calculations.
+As an internal implementation detail, this is achieved under the hood by using LLVM's [`half`](https://llvm.org/docs/LangRef.html#half-precision-floating-point-intrinsics) type, which behaves similarly to what the GCC [`-fexcess-precision=16`](https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html#index-fexcess-precision) flag does for C/C++ code.
```jldoctest
julia> sizeof(Float16(4.))
diff --git a/doc/src/manual/methods.md b/doc/src/manual/methods.md
index 45d22e08aaffe..6014b49c346d2 100644
--- a/doc/src/manual/methods.md
+++ b/doc/src/manual/methods.md
@@ -578,73 +578,8 @@ However, future calls to `tryeval` will continue to see the definition of `newfu
You may want to try this for yourself to see how it works.
-The implementation of this behavior is a "world age counter".
-This monotonically increasing value tracks each method definition operation.
-This allows describing "the set of method definitions visible to a given runtime environment"
-as a single number, or "world age".
-It also allows comparing the methods available in two worlds just by comparing their ordinal value.
-In the example above, we see that the "current world" (in which the method `newfun` exists),
-is one greater than the task-local "runtime world" that was fixed when the execution of `tryeval` started.
-
-Sometimes it is necessary to get around this (for example, if you are implementing the above REPL).
-Fortunately, there is an easy solution: call the function using [`Base.invokelatest`](@ref) or
-the macro version [`Base.@invokelatest`](@ref):
-
-```jldoctest
-julia> function tryeval2()
- @eval newfun2() = 2
- @invokelatest newfun2()
- end
-tryeval2 (generic function with 1 method)
-
-julia> tryeval2()
-2
-```
-
-Finally, let's take a look at some more complex examples where this rule comes into play.
-Define a function `f(x)`, which initially has one method:
-
-```jldoctest redefinemethod
-julia> f(x) = "original definition"
-f (generic function with 1 method)
-```
-
-Start some other operations that use `f(x)`:
-
-```jldoctest redefinemethod
-julia> g(x) = f(x)
-g (generic function with 1 method)
-
-julia> t = @async f(wait()); yield();
-```
-
-Now we add some new methods to `f(x)`:
-
-```jldoctest redefinemethod
-julia> f(x::Int) = "definition for Int"
-f (generic function with 2 methods)
-
-julia> f(x::Type{Int}) = "definition for Type{Int}"
-f (generic function with 3 methods)
-```
-
-Compare how these results differ:
-
-```jldoctest redefinemethod
-julia> f(1)
-"definition for Int"
-
-julia> g(1)
-"definition for Int"
-
-julia> fetch(schedule(t, 1))
-"original definition"
-
-julia> t = @async f(wait()); yield();
-
-julia> fetch(schedule(t, 1))
-"definition for Int"
-```
+The implementation of this behavior is a "world age counter", which is further described in the [Worldage](@ref man-worldage)
+manual chapter.
## Design Patterns with Parametric Methods
diff --git a/doc/src/manual/modules.md b/doc/src/manual/modules.md
index cf24474916bef..23974ae7ecce1 100644
--- a/doc/src/manual/modules.md
+++ b/doc/src/manual/modules.md
@@ -192,8 +192,6 @@ julia> nice(::Cat) = "nice 😸"
ERROR: invalid method definition in Main: function NiceStuff.nice must be explicitly imported to be extended
Stacktrace:
[1] top-level scope
- @ none:0
- [2] top-level scope
@ none:1
```
@@ -318,6 +316,68 @@ Here, Julia cannot decide which `f` you are referring to, so you have to make a
3. When the names in question *do* share a meaning, it is common for one module to import it from another, or have a lightweight “base” package with the sole function of defining an interface like this, which can be used by other packages. It is conventional to have such package names end in `...Base` (which has nothing to do with Julia's `Base` module).
+### Precedence order of definitions
+
+There are in general four kinds of binding definitions:
+ 1. Those provided via implicit import through `using M`
+ 2. Those provided via explicit import (e.g. `using M: x`, `import M: x`)
+ 3. Those declared implicitly as global (via `global x` without type specification)
+ 4. Those declared explicitly using definition syntax (`const`, `global x::T`, `struct`, etc.)
+
+Syntactically, we divide these into three precedence levels (from weakest to strongest)
+ 1. Implicit imports
+ 2. Implicit declarations
+ 3. Explicit declarations and imports
+
+In general, we permit replacement of weaker bindings by stronger ones:
+
+```julia-repl
+julia> module M1; const x = 1; export x; end
+Main.M1
+
+julia> using .M1
+
+julia> x # Implicit import from M1
+1
+
+julia> begin; f() = (global x; x = 1) end
+
+julia> x # Implicit declaration
+ERROR: UndefVarError: `x` not defined in `Main`
+Suggestion: add an appropriate import or assignment. This global was declared but not assigned.
+
+julia> const x = 2 # Explicit declaration
+2
+```
+
+However, within the explicit precedence level, replacement is syntactically disallowed:
+```julia-repl
+julia> module M1; const x = 1; export x; end
+Main.M1
+
+julia> import .M1: x
+
+julia> const x = 2
+ERROR: cannot declare Main.x constant; it was already declared as an import
+Stacktrace:
+ [1] top-level scope
+ @ REPL[3]:1
+```
+
+or ignored:
+
+```julia-repl
+julia> const y = 2
+2
+
+julia> import .M1: x as y
+WARNING: import of M1.x into Main conflicts with an existing identifier; ignored.
+```
+
+The resolution of an implicit binding depends on the set of all `using`'d modules visible
+in the current world age. See [the manual chapter on world age](@ref man-worldage) for more
+details.
+
### Default top-level definitions and bare modules
Modules automatically contain `using Core`, `using Base`, and definitions of the [`eval`](@ref)
@@ -373,7 +433,7 @@ There are three important standard modules:
Modules can contain *submodules*, nesting the same syntax `module ... end`. They can be used to introduce separate namespaces, which can be helpful for organizing complex codebases. Note that each `module` introduces its own [scope](@ref scope-of-variables), so submodules do not automatically “inherit” names from their parent.
-It is recommended that submodules refer to other modules within the enclosing parent module (including the latter) using *relative module qualifiers* in `using` and `import` statements. A relative module qualifier starts with a period (`.`), which corresponds to the current module, and each successive `.` leads to the parent of the current module. This should be followed by modules if necessary, and eventually the actual name to access, all separated by `.`s.
+It is recommended that submodules refer to other modules within the enclosing parent module (including the latter) using *relative module qualifiers* in `using` and `import` statements. A relative module qualifier starts with a period (`.`), which corresponds to the current module, and each successive `.` leads to the parent of the current module. This should be followed by modules if necessary, and eventually the actual name to access, all separated by `.`s. As a special case, however, referring to the module root can be written without `.`, avoiding the need to count the depth to reach that module.
Consider the following example, where the submodule `SubA` defines a function, which is then extended in its “sibling” module:
@@ -388,6 +448,7 @@ julia> module ParentModule
export add_D # export it from ParentModule too
module SubB
import ..SubA: add_D # relative path for a “sibling” module
+ # import ParentModule.SubA: add_D # when in a package, such as when this is loaded by using or import, this would be equivalent to the previous import, but not at the REPL
struct Infinity end
add_D(x::Infinity) = x
end
@@ -395,12 +456,16 @@ julia> module ParentModule
```
-You may see code in packages, which, in a similar situation, uses
+You may see code in packages, which, in a similar situation, uses import without the `.`:
+```jldoctest
+julia> import ParentModule.SubA: add_D
+ERROR: ArgumentError: Package ParentModule not found in current path.
+```
+However, since this operates through [code loading](@ref code-loading), it only works if `ParentModule` is in a package in a file. If `ParentModule` was defined at the REPL, it is necessary to use use relative paths:
```jldoctest module_manual
julia> import .ParentModule.SubA: add_D
```
-However, this operates through [code loading](@ref code-loading), and thus only works if `ParentModule` is in a package. It is better to use relative paths.
Note that the order of definitions also matters if you are evaluating values. Consider
@@ -493,8 +558,12 @@ In particular, if you define a `function __init__()` in a module, then Julia wil
immediately *after* the module is loaded (e.g., by `import`, `using`, or `require`) at runtime
for the *first* time (i.e., `__init__` is only called once, and only after all statements in the
module have been executed). Because it is called after the module is fully imported, any submodules
-or other imported modules have their `__init__` functions called *before* the `__init__` of the
-enclosing module.
+or other imported modules have their `__init__` functions called *before* the `__init__` of
+the enclosing module. This is also synchronized across threads, so that code can safely rely upon
+this ordering of effects, such that all `__init__` will have run, in dependency ordering,
+before the `using` result is completed. They may run concurrently with other `__init__`
+methods which are not dependencies however, so be careful when accessing any shared state
+outside the current module to use locks when needed.
Two typical uses of `__init__` are calling runtime initialization functions of external C libraries
and initializing global constants that involve pointers returned by external libraries. For example,
@@ -526,17 +595,6 @@ pointer value must be called at runtime for precompilation to work ([`Ptr`](@ref
null pointers unless they are hidden inside an [`isbits`](@ref) object). This includes the return values
of the Julia functions [`@cfunction`](@ref) and [`pointer`](@ref).
-Dictionary and set types, or in general anything that depends on the output of a `hash(key)` method,
-are a trickier case. In the common case where the keys are numbers, strings, symbols, ranges,
-`Expr`, or compositions of these types (via arrays, tuples, sets, pairs, etc.) they are safe to
-precompile. However, for a few other key types, such as `Function` or `DataType` and generic
-user-defined types where you haven't defined a `hash` method, the fallback `hash` method depends
-on the memory address of the object (via its `objectid`) and hence may change from run to run.
-If you have one of these key types, or if you aren't sure, to be safe you can initialize this
-dictionary from within your `__init__` function. Alternatively, you can use the [`IdDict`](@ref)
-dictionary type, which is specially handled by precompilation so that it is safe to initialize
-at compile-time.
-
When using precompilation, it is important to keep a clear sense of the distinction between the
compilation phase and the execution phase. In this mode, it will often be much more clearly apparent
that Julia is a compiler which allows execution of arbitrary Julia code, not a standalone interpreter
diff --git a/doc/src/manual/variables-and-scoping.md b/doc/src/manual/variables-and-scoping.md
index 99f7ba088311d..ab2d969dd9b0e 100644
--- a/doc/src/manual/variables-and-scoping.md
+++ b/doc/src/manual/variables-and-scoping.md
@@ -730,65 +730,28 @@ Note that `const` only affects the variable binding; the variable may be bound t
object (such as an array), and that object may still be modified. Additionally when one tries
to assign a value to a variable that is declared constant the following scenarios are possible:
-* if a new value has a different type than the type of the constant then an error is thrown:
+* Attempting to replace a constant without the const `keyword` is disallowed:
```jldoctest
julia> const x = 1.0
1.0
julia> x = 1
-ERROR: invalid redefinition of constant x
+ERROR: invalid assignment to constant x. This redefinition may be permitted using the `const` keyword.
```
-* if a new value has the same type as the constant then a warning is printed:
+* All other defefinitions of constants are permitted, but may cause significant re-compilation:
```jldoctest
julia> const y = 1.0
1.0
julia> const y = 2.0
-WARNING: redefinition of constant y. This may fail, cause incorrect answers, or produce other errors.
2.0
```
-* if an assignment would not result in the change of variable value no message is given:
-```jldoctest
-julia> const z = 100
-100
-
-julia> z = 100
-100
-```
-* if an assignment would change the mutable object to which the variable points (regardless of whether those two objects are deeply equal), a warning is printed:
-```jldoctest
-julia> const a = [1]
-1-element Vector{Int64}:
- 1
-
-julia> const a = [1]
-WARNING: redefinition of constant a. This may fail, cause incorrect answers, or produce other errors.
-1-element Vector{Int64}:
- 1
-```
-Note that although sometimes possible, changing the value of a `const` variable is strongly
-discouraged, and is intended only for convenience during interactive use. Changing constants can
-cause various problems or unexpected behaviors. For instance, if a method references a constant and
-is already compiled before the constant is changed, then it might keep using the old value:
-
-```jldoctest
-julia> const x = 1
-1
-
-julia> f() = x
-f (generic function with 1 method)
-
-julia> f()
-1
-
-julia> const x = 2
-WARNING: redefinition of constant x. This may fail, cause incorrect answers, or produce other errors.
-2
-
-julia> f()
-1
-```
+!!! compat "Julia 1.12"
+ Prior to julia 1.12, redefinition of constants was poorly supported. It was restricted to
+ redefinition of constants of the same type and could lead to observably incorrect behavior
+ or crashes. Constant redefinition is highly discouraged in versions of julia prior to 1.12.
+ See the manual for prior julia versions for further information.
## [Typed Globals](@id man-typed-globals)
diff --git a/doc/src/manual/variables.md b/doc/src/manual/variables.md
index ad2c60a029032..4c3e98ca57281 100644
--- a/doc/src/manual/variables.md
+++ b/doc/src/manual/variables.md
@@ -79,10 +79,12 @@ julia> Base.length
length (generic function with 79 methods)
```
-However, if you try to redefine a built-in constant or function already in use, Julia will give
-you an error:
+However, if you try to redefine a built-in constant or function that you
+have explicitly imported, Julia will give you an error:
```jldoctest
+julia> using Base: pi, sqrt
+
julia> pi
π = 3.1415926535897...
@@ -96,6 +98,10 @@ julia> sqrt = 4
ERROR: cannot assign a value to imported variable Base.sqrt from module Main
```
+!!! compat "Julia 1.12"
+ Note that in versions prior to Julia 1.12, these errors depended on *use* rather than definition of
+ the conflicting binding.
+
## [Allowed Variable Names](@id man-allowed-variable-names)
Variable names must begin with a letter (A-Z or a-z), underscore, or a subset of Unicode code
diff --git a/doc/src/manual/worldage.md b/doc/src/manual/worldage.md
new file mode 100644
index 0000000000000..26853b84b3031
--- /dev/null
+++ b/doc/src/manual/worldage.md
@@ -0,0 +1,295 @@
+# The World Age mechanism
+
+!!! note
+ World age is an advanced concept. For the vast majority of Julia users, the world age
+ mechanism operates invisibly in the background. This documentation is intended for the
+ few users who may encounter world-age related issues or error messages.
+
+!!! compat "Julia 1.12"
+ Prior to Julia 1.12, the world age mechanism did not apply to changes to the global binding table.
+ The documentation in this chapter is specific to Julia 1.12+.
+
+!!! warning
+ This manual chapter uses internal functions to introspect world age and runtime data structures
+ as an explanatory aid. In general, unless otherwise noted the world age mechanism is not a stable
+ interface and should be interacted with in packages through stable APIs (e.g. `invokelatest`) only.
+ In particular, do not assume that world ages are always integers or that they have a linear order.
+
+## World age in general
+
+The "world age counter" is a monotonically increasing counter that is incremented for every
+change to the global method table or the global binding table (e.g. through method definition,
+type definition, `import`/`using` declaration, creation of (typed) globals or definition of constants).
+
+The current value of the global world age counter can be retrieved using the (internal) function [`Base.get_world_counter`](@ref).
+
+```julia-repl
+julia> Base.get_world_counter()
+0x0000000000009632
+
+julia> const x = 1
+
+julia> Base.get_world_counter()
+0x0000000000009633
+```
+
+In addition, each [`Task`](@ref) stores a local world age that determines which modifications to
+the global binding and method tables are currently visible to the running task. The world age of
+the running task will never exceed the global world age counter, but may run arbitrarily behind it.
+In general the term "current world age" refers to the local world age of the currently running task.
+The current world age may be retrieved using the (internal) function [`Base.tls_world_age`](@ref)
+
+```julia-repl
+julia> function f end
+f (generic function with 0 methods)
+
+julia> begin
+ @show (Int(Base.get_world_counter()), Int(Base.tls_world_age()))
+ Core.eval(@__MODULE__, :(f() = 1))
+ @show (Int(Base.get_world_counter()), Int(Base.tls_world_age()))
+ f()
+ end
+(Int(Base.get_world_counter()), Int(Base.tls_world_age())) = (38452, 38452)
+(Int(Base.get_world_counter()), Int(Base.tls_world_age())) = (38453, 38452)
+ERROR: MethodError: no method matching f()
+The applicable method may be too new: running in current world age 38452, while global world is 38453.
+
+Closest candidates are:
+ f() (method too new to be called from this world context.)
+ @ Main REPL[2]:3
+
+Stacktrace:
+ [1] top-level scope
+ @ REPL[2]:5
+
+julia> (f(), Int(Base.tls_world_age()))
+(1, 38453)
+```
+
+Here the definition of the method `f` raised the global world counter, but the current world
+age did not change. As a result, the definition of `f` was not visible in the currently
+executing task and a [`MethodError`](@ref) resulted.
+
+!!! note
+ The method error printing provided additional information that `f()` is available in a newer world age.
+ This information is added by the error display, not the task that threw the `MethodError`.
+ The thrown `MethodError` is identical whether or not a matching definition of `f()` exists
+ in a newer world age.
+
+However, note that the definition of `f()` was subsequently available at the next REPL prompt, because
+the current task's world age had been raised. In general, certain syntactic constructs (in particular most definitions)
+will raise the current task's world age to the latest global world age, thus making all changes
+(both from the current task and any concurrently executing other tasks) visible. The following statements
+raise the current world age:
+
+1. An explicit invocation of `Core.@latestworld`
+2. The start of every top-level statement
+3. The start of every REPL prompt
+4. Any type or struct definition
+5. Any method definition
+6. Any constant declaration
+7. Any global variable declaration (but not a global variable assignment)
+8. Any `using`, `import`, `export` or `public` statement
+9. Certain other macros like [`@eval`](@ref) (depends on the macro implementation)
+
+Note, however, that the current task's world age may only ever be permanently incremented at
+top level. As a general rule, using any of the above statements in non-top-level scope is a syntax error:
+
+```julia-repl
+julia> f() = Core.@latestworld
+ERROR: syntax: World age increment not at top level
+Stacktrace:
+ [1] top-level scope
+ @ REPL[5]:1
+```
+
+When it isn't (for example for `@eval`), the world age side effect is ignored.
+
+As a result of these rules, Julia may assume that the world age does not change
+within the execution of an ordinary function.
+
+```julia
+function my_function()
+ before = Base.tls_world_age()
+ # Any arbitrary code
+ after = Base.tls_world_age()
+ @assert before === after # always true
+end
+```
+
+This is the key invariant that allows Julia to optimize based on the current state
+of its global data structures, while still having the well-defined ability to change
+these data structures.
+
+## Temporarily raising the world age using `invokelatest`
+
+As described above, it is not possible to permanently raise the world age for the remainder of
+a `Task`'s execution unless the task is executing top-level statements. However, it is possible to
+temporarily change the world age in a scoped manner using `invokelatest`:
+
+```jldoctest
+julia> function f end
+f (generic function with 0 methods)
+
+julia> begin
+ Core.eval(@__MODULE__, :(f() = 1))
+ invokelatest(f)
+ end
+1
+```
+
+`invokelatest` will temporarily raise the current task's world age to the latest global world age (at
+entry to `invokelatest`) and execute the provided function. Note that the world age will return
+to its prior value upon exit from `invokelatest`.
+
+## World age and const struct redefinitions
+
+The semantics described above for method redefinition also apply to redefinition of constants:
+
+```jldoctest
+julia> const x = 1
+1
+
+julia> get_const() = x
+get_const (generic function with 1 method)
+
+julia> begin
+ @show get_const()
+ Core.eval(@__MODULE__, :(const x = 2))
+ @show get_const()
+ Core.@latestworld
+ @show get_const()
+ end
+get_const() = 1
+get_const() = 1
+get_const() = 2
+2
+```
+
+However, for the avoidance of doubt, they do not apply to ordinary assignment to global variables, which becomes visible immediately:
+```jldoctest
+julia> global y = 1
+1
+
+julia> get_global() = y
+get_global (generic function with 1 method)
+
+julia> begin
+ @show get_global()
+ Core.eval(@__MODULE__, :(y = 2))
+ @show get_global()
+ end
+get_global() = 1
+get_global() = 2
+2
+```
+
+One particular special case of constant reassignment is the redefinition of struct types:
+
+```jldoctest; filter = r"\@world\(MyStruct, \d+\:\d+\)"
+julia> struct MyStruct
+ x::Int
+ end
+
+julia> const one_field = MyStruct(1)
+MyStruct(1)
+
+julia> struct MyStruct
+ x::Int
+ y::Float64
+ end
+
+julia> const two_field = MyStruct(1, 2.0)
+MyStruct(1, 2.0)
+
+julia> one_field
+@world(MyStruct, 38452:38455)(1)
+
+julia> two_field
+MyStruct(1, 2.0)
+```
+
+Internally the two definitions of `MyStruct` are entirely separate types. However,
+after the new `MyStruct` type is defined, there is no longer any default binding
+for the original definition of `MyStruct`. To nevertheless facilitate access to
+these types, the special [`@world`](@ref) macro may be used to access the meaning
+of a name in a previous world. However, this facility is intended for introspection
+only and in particular note that world age numbers are not stable across precompilation
+and should in general be treated opaquely.
+
+### Binding partition introspection
+
+In certain cases, it can be helpful to introspect the system's understanding of what
+a binding means in any particular world age. The default display printing of `Core.Binding`
+provides a helpful summary (e.g. on the `MyStruct` example from above):
+
+```julia-repl
+julia> convert(Core.Binding, GlobalRef(@__MODULE__, :MyStruct))
+Binding Main.MyStruct
+ 38456:∞ - constant binding to MyStruct
+ 38452:38455 - constant binding to @world(MyStruct, 38452:38455)
+ 38451:38451 - backdated constant binding to @world(MyStruct, 38452:38455)
+ 0:38450 - backdated constant binding to @world(MyStruct, 38452:38455)
+```
+
+## World age and `using`/`import`
+
+Bindings provided via `using` and `import` also operate via the world age mechanism.
+Binding resolution is a stateless function of the `import` and `using` definitions
+visible in the current world age. For example:
+
+```julia-repl
+julia> module M1; const x = 1; export x; end
+
+julia> module M2; const x = 2; export x; end
+
+julia> using .M1
+
+julia> x
+1
+
+julia> using .M2
+
+julia> x
+ERROR: UndefVarError: `x` not defined in `Main`
+Hint: It looks like two or more modules export different bindings with this name, resulting in ambiguity. Try explicitly importing it from a particular module, or qualifying the name with the module it should come from.
+
+julia> convert(Core.Binding, GlobalRef(@__MODULE__, :x))
+Binding Main.x
+ 38458:∞ - ambiguous binding - guard entry
+ 38457:38457 - implicit `using` resolved to constant 1
+```
+
+## World age capture
+
+Certain language features capture the current task's world age. Perhaps the most common of
+these is creation of new tasks. Newly created tasks will inherit the creating task's local
+world age at creation time and will retain said world age (unless explicitly raised) even
+if the originating tasks raises its world age:
+
+```julia-repl
+julia> const x = 1
+
+julia> t = @task (wait(); println("Running now"); x);
+
+julia> const x = 2
+
+julia> schedule(t);
+Running now
+
+julia> x
+2
+
+julia> fetch(t)
+1
+```
+
+In addition to tasks, opaque closures also capture their world age at creation. See [`Base.Experimental.@opaque`](@ref).
+
+```@docs
+Base.@world
+Base.get_world_counter
+Base.tls_world_age
+Base.invoke_in_world
+Base.Experimental.@opaque
+```
diff --git a/src/APInt-C.cpp b/src/APInt-C.cpp
index 86b0bdb27638b..64b87a1096d44 100644
--- a/src/APInt-C.cpp
+++ b/src/APInt-C.cpp
@@ -321,7 +321,7 @@ void LLVMFPtoInt(jl_datatype_t *ty, void *pa, jl_datatype_t *oty, integerPart *p
Val = julia_bfloat_to_float(*(uint16_t*)pa);
else if (ty == jl_float32_type)
Val = *(float*)pa;
- else if (jl_float64_type)
+ else if (ty == jl_float64_type)
Val = *(double*)pa;
else
jl_error("FPtoSI: runtime floating point intrinsics are not implemented for bit sizes other than 16, 32 and 64");
@@ -352,7 +352,7 @@ void LLVMFPtoInt(jl_datatype_t *ty, void *pa, jl_datatype_t *oty, integerPart *p
else {
APFloat a(Val);
bool isVeryExact;
- APFloat::roundingMode rounding_mode = APFloat::rmNearestTiesToEven;
+ APFloat::roundingMode rounding_mode = RoundingMode::TowardZero;
unsigned nbytes = alignTo(onumbits, integerPartWidth) / host_char_bit;
integerPart *parts = (integerPart*)alloca(nbytes);
APFloat::opStatus status = a.convertToInteger(MutableArrayRef(parts, nbytes), onumbits, isSigned, rounding_mode, &isVeryExact);
diff --git a/src/Makefile b/src/Makefile
index b49d27e05ff28..e859acc765354 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -56,7 +56,7 @@ SRCS := \
jltypes gf typemap smallintset ast builtins module interpreter symbol \
dlload sys init task array genericmemory staticdata toplevel jl_uv datatype \
simplevector runtime_intrinsics precompile jloptions mtarraylist \
- threading scheduler stackwalk \
+ threading scheduler stackwalk null_sysimage \
method jlapi signal-handling safepoint timing subtype rtutils \
crc32c APInt-C processor ircode opaque_closure codegen-stubs coverage runtime_ccall engine \
$(GC_SRCS)
@@ -75,7 +75,7 @@ GC_CODEGEN_SRCS += llvm-late-gc-lowering-stock
endif
CODEGEN_SRCS := codegen jitlayers aotcompile debuginfo disasm llvm-simdloop \
llvm-pass-helpers llvm-ptls \
- llvm-lower-handlers llvm-propagate-addrspaces \
+ llvm-lower-handlers llvm-propagate-addrspaces null_sysimage \
llvm-multiversioning llvm-alloc-opt llvm-alloc-helpers cgmemmgr llvm-remove-addrspaces \
llvm-remove-ni llvm-julia-licm llvm-demote-float16 llvm-cpufeatures pipeline llvm_api \
$(GC_CODEGEN_SRCS)
@@ -184,7 +184,7 @@ endif
CLANG_LDFLAGS := $(LLVM_LDFLAGS)
ifeq ($(OS), Darwin)
CLANG_LDFLAGS += -Wl,-undefined,dynamic_lookup
-OSLIBS += -Wl,-U,__dyld_atfork_parent -Wl,-U,__dyld_atfork_prepare -Wl,-U,__dyld_dlopen_atfork_parent -Wl,-U,__dyld_dlopen_atfork_prepare -Wl,-U,_jl_image_pointers -Wl,-U,_jl_system_image_data -Wl,-U,_jl_system_image_size
+OSLIBS += -Wl,-U,__dyld_atfork_parent -Wl,-U,__dyld_atfork_prepare -Wl,-U,__dyld_dlopen_atfork_parent -Wl,-U,__dyld_dlopen_atfork_prepare
LIBJULIA_PATH_REL := @rpath/libjulia
else
LIBJULIA_PATH_REL := libjulia
@@ -205,10 +205,10 @@ CODEGEN_OBJS := $(CODEGEN_SRCS:%=$(BUILDDIR)/%.o)
CODEGEN_DOBJS := $(CODEGEN_SRCS:%=$(BUILDDIR)/%.dbg.obj)
# Add SONAME defines so we can embed proper `dlopen()` calls.
-ADDL_SHIPFLAGS := "-DJL_SYSTEM_IMAGE_PATH=\"$(build_private_libdir_rel)/sys.$(SHLIB_EXT)\"" \
- "-DJL_LIBJULIA_SONAME=\"$(LIBJULIA_PATH_REL).$(JL_MAJOR_SHLIB_EXT)\""
-ADDL_DEBUGFLAGS := "-DJL_SYSTEM_IMAGE_PATH=\"$(build_private_libdir_rel)/sys-debug.$(SHLIB_EXT)\"" \
- "-DJL_LIBJULIA_SONAME=\"$(LIBJULIA_PATH_REL)-debug.$(JL_MAJOR_SHLIB_EXT)\""
+ADDL_SHIPFLAGS := -DJL_SYSTEM_IMAGE_PATH=$(call shell_escape,$(call c_escape,$(call normalize_path,$(build_private_libdir_rel)/sys.$(SHLIB_EXT)))) \
+ -DJL_LIBJULIA_SONAME=$(call shell_escape,$(call c_escape,$(LIBJULIA_PATH_REL).$(JL_MAJOR_SHLIB_EXT)))
+ADDL_DEBUGFLAGS := -DJL_SYSTEM_IMAGE_PATH=$(call shell_escape,$(call c_escape,$(call normalize_path,$(build_private_libdir_rel)/sys-debug.$(SHLIB_EXT)))) \
+ -DJL_LIBJULIA_SONAME=$(call shell_escape,$(call c_escape,$(LIBJULIA_PATH_REL)-debug.$(JL_MAJOR_SHLIB_EXT)))
SHIPFLAGS += $(FLAGS) $(ADDL_SHIPFLAGS)
DEBUGFLAGS += $(FLAGS) $(ADDL_DEBUGFLAGS)
diff --git a/src/aotcompile.cpp b/src/aotcompile.cpp
index 5524518da46fa..e3c52e4796788 100644
--- a/src/aotcompile.cpp
+++ b/src/aotcompile.cpp
@@ -423,28 +423,24 @@ static void resolve_workqueue(jl_codegen_params_t ¶ms, egal_set &method_root
if (decls.functionObject == "jl_fptr_args") {
preal_decl = decls.specFunctionObject;
}
- else if (decls.functionObject != "jl_fptr_sparam" && decls.functionObject != "jl_f_opaque_closure_call") {
+ else if (decls.functionObject != "jl_fptr_sparam" && decls.functionObject != "jl_f_opaque_closure_call" && decls.functionObject != "jl_fptr_const_return") {
preal_decl = decls.specFunctionObject;
preal_specsig = true;
}
}
- else if (params.params->trim) {
- jl_safe_printf("warning: no code provided for function ");
- jl_(codeinst->def);
- if (params.params->trim)
- abort();
- }
}
// patch up the prototype we emitted earlier
Module *mod = proto.decl->getParent();
assert(proto.decl->isDeclaration());
Function *pinvoke = nullptr;
+ if (preal_decl.empty() && jl_atomic_load_relaxed(&codeinst->invoke) == jl_fptr_const_return_addr) {
+ std::string gf_thunk_name = emit_abi_constreturn(mod, params, proto.specsig, codeinst);
+ preal_specsig = proto.specsig;
+ if (invokeName.empty())
+ invokeName = "jl_fptr_const_return";
+ preal_decl = mod->getNamedValue(gf_thunk_name)->getName();
+ }
if (preal_decl.empty()) {
- if (invokeName.empty() && params.params->trim) {
- jl_safe_printf("warning: bailed out to invoke when compiling: ");
- jl_(codeinst->def);
- abort();
- }
pinvoke = emit_tojlinvoke(codeinst, invokeName, mod, params);
if (!proto.specsig)
proto.decl->replaceAllUsesWith(pinvoke);
@@ -483,6 +479,7 @@ static void resolve_workqueue(jl_codegen_params_t ¶ms, egal_set &method_root
ocinvokeDecl = pinvoke->getName();
assert(!ocinvokeDecl.empty());
assert(ocinvokeDecl != "jl_fptr_args");
+ assert(ocinvokeDecl != "jl_fptr_const_return");
assert(ocinvokeDecl != "jl_fptr_sparam");
// merge and/or rename this prototype to the real function
if (Value *specfun = mod->getNamedValue(ocinvokeDecl)) {
@@ -499,12 +496,140 @@ static void resolve_workqueue(jl_codegen_params_t ¶ms, egal_set &method_root
JL_GC_POP();
}
+/// Link the function in the source module into the destination module if
+/// needed, setting up mapping information.
+/// Similar to orc::cloneFunctionDecl, but more complete for greater correctness
+Function *IRLinker_copyFunctionProto(Module *DstM, Function *SF) {
+ // If there is no linkage to be performed or we are linking from the source,
+ // bring SF over, if we haven't already.
+ if (SF->getParent() == DstM)
+ return SF;
+ if (auto *F = DstM->getNamedValue(SF->getName()))
+ return cast(F);
+ auto *F = Function::Create(SF->getFunctionType(), SF->getLinkage(),
+ SF->getAddressSpace(), SF->getName(), DstM);
+ F->copyAttributesFrom(SF);
+ F->IsNewDbgInfoFormat = SF->IsNewDbgInfoFormat;
+
+ // Remove these copied constants since they point to the source module.
+ F->setPersonalityFn(nullptr);
+ F->setPrefixData(nullptr);
+ F->setPrologueData(nullptr);
+ return F;
+}
+
+static Function *aot_abi_converter(jl_codegen_params_t ¶ms, Module *M, jl_value_t *declrt, jl_value_t *sigt, size_t nargs, bool specsig, jl_code_instance_t *codeinst, Module *defM, StringRef func, StringRef specfunc, bool target_specsig)
+{
+ std::string gf_thunk_name;
+ if (!specfunc.empty()) {
+ Value *llvmtarget = IRLinker_copyFunctionProto(M, defM->getFunction(specfunc));
+ gf_thunk_name = emit_abi_converter(M, params, declrt, sigt, nargs, specsig, codeinst, llvmtarget, target_specsig);
+ }
+ else {
+ Value *llvmtarget = func.empty() ? nullptr : IRLinker_copyFunctionProto(M, defM->getFunction(func));
+ gf_thunk_name = emit_abi_dispatcher(M, params, declrt, sigt, nargs, specsig, codeinst, llvmtarget);
+ }
+ auto F = M->getFunction(gf_thunk_name);
+ assert(F);
+ return F;
+}
+
+static void generate_cfunc_thunks(jl_codegen_params_t ¶ms, jl_compiled_functions_t &compiled_functions)
+{
+ DenseMap compiled_mi;
+ for (auto &def : compiled_functions) {
+ jl_code_instance_t *this_code = def.first;
+ jl_method_instance_t *mi = jl_get_ci_mi(this_code);
+ if (this_code->owner == jl_nothing && jl_atomic_load_relaxed(&this_code->max_world) == ~(size_t)0 && this_code->def == (jl_value_t*)mi)
+ compiled_mi[mi] = this_code;
+ }
+ size_t latestworld = jl_atomic_load_acquire(&jl_world_counter);
+ for (cfunc_decl_t &cfunc : params.cfuncs) {
+ Module *M = cfunc.theFptr->getParent();
+ jl_value_t *sigt = cfunc.sigt;
+ JL_GC_PROMISE_ROOTED(sigt);
+ jl_value_t *declrt = cfunc.declrt;
+ JL_GC_PROMISE_ROOTED(declrt);
+ Function *unspec = aot_abi_converter(params, M, declrt, sigt, cfunc.nargs, cfunc.specsig, nullptr, nullptr, "", "", false);
+ jl_code_instance_t *codeinst = nullptr;
+ auto assign_fptr = [¶ms, &cfunc, &codeinst, &unspec](Function *f) {
+ ConstantArray *init = cast(cfunc.cfuncdata->getInitializer());
+ SmallVector initvals;
+ for (unsigned i = 0; i < init->getNumOperands(); ++i)
+ initvals.push_back(init->getOperand(i));
+ assert(initvals.size() == 6);
+ assert(initvals[0]->isNullValue());
+ if (codeinst) {
+ Constant *llvmcodeinst = literal_pointer_val_slot(params, f->getParent(), (jl_value_t*)codeinst);
+ initvals[0] = llvmcodeinst; // plast_codeinst
+ }
+ assert(initvals[2]->isNullValue());
+ initvals[2] = unspec;
+ cfunc.cfuncdata->setInitializer(ConstantArray::get(init->getType(), initvals));
+ cfunc.theFptr->setInitializer(f);
+ };
+ Module *defM = nullptr;
+ StringRef func;
+ jl_method_instance_t *mi = jl_get_specialization1((jl_tupletype_t*)sigt, latestworld, 0);
+ if (mi) {
+ auto it = compiled_mi.find(mi);
+ if (it != compiled_mi.end()) {
+ codeinst = it->second;
+ JL_GC_PROMISE_ROOTED(codeinst);
+ auto defs = compiled_functions.find(codeinst);
+ defM = std::get<0>(defs->second).getModuleUnlocked();
+ const jl_llvm_functions_t &decls = std::get<1>(defs->second);
+ func = decls.functionObject;
+ StringRef specfunc = decls.specFunctionObject;
+ jl_value_t *astrt = codeinst->rettype;
+ if (astrt != (jl_value_t*)jl_bottom_type &&
+ jl_type_intersection(astrt, declrt) == jl_bottom_type) {
+ // Do not warn if the function never returns since it is
+ // occasionally required by the C API (typically error callbacks)
+ // even though we're likely to encounter memory errors in that case
+ jl_printf(JL_STDERR, "WARNING: cfunction: return type of %s does not match\n", name_from_method_instance(mi));
+ }
+ if (func == "jl_fptr_const_return") {
+ std::string gf_thunk_name = emit_abi_constreturn(M, params, declrt, sigt, cfunc.nargs, cfunc.specsig, codeinst->rettype_const);
+ auto F = M->getFunction(gf_thunk_name);
+ assert(F);
+ assign_fptr(F);
+ continue;
+ }
+ else if (func == "jl_fptr_args") {
+ assert(!specfunc.empty());
+ if (!cfunc.specsig && jl_subtype(astrt, declrt)) {
+ assign_fptr(IRLinker_copyFunctionProto(M, defM->getFunction(specfunc)));
+ continue;
+ }
+ assign_fptr(aot_abi_converter(params, M, declrt, sigt, cfunc.nargs, cfunc.specsig, codeinst, defM, func, specfunc, false));
+ continue;
+ }
+ else if (func == "jl_fptr_sparam" || func == "jl_f_opaque_closure_call") {
+ func = ""; // use jl_invoke instead for these, since we don't declare these prototypes
+ }
+ else {
+ assert(!specfunc.empty());
+ if (jl_egal(mi->specTypes, sigt) && jl_egal(declrt, astrt)) {
+ assign_fptr(IRLinker_copyFunctionProto(M, defM->getFunction(specfunc)));
+ continue;
+ }
+ assign_fptr(aot_abi_converter(params, M, declrt, sigt, cfunc.nargs, cfunc.specsig, codeinst, defM, func, specfunc, true));
+ continue;
+ }
+ }
+ }
+ Function *f = codeinst ? aot_abi_converter(params, M, declrt, sigt, cfunc.nargs, cfunc.specsig, codeinst, defM, func, "", false) : unspec;
+ return assign_fptr(f);
+ }
+}
+
// takes the running content that has collected in the shadow module and dump it to disk
// this builds the object file portion of the sysimage files for fast startup
-// `_external_linkage` create linkages between pkgimages.
+// `external_linkage` create linkages between pkgimages.
extern "C" JL_DLLEXPORT_CODEGEN
-void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvmmod, int _trim, int _external_linkage, size_t world)
+void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvmmod, int trim, int external_linkage, size_t world)
{
JL_TIMING(INFERENCE, INFERENCE);
auto ct = jl_current_task;
@@ -517,10 +642,9 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm
compiler_start_time = jl_hrtime();
jl_cgparams_t cgparams = jl_default_cgparams;
- cgparams.trim = _trim ? 1 : 0;
size_t compile_for[] = { jl_typeinf_world, world };
int compiler_world = 1;
- if (_trim || compile_for[0] == 0)
+ if (trim || compile_for[0] == 0)
compiler_world = 0;
jl_value_t **fargs;
JL_GC_PUSHARGS(fargs, 4);
@@ -537,7 +661,7 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm
fargs[2] = (jl_value_t*)worlds;
jl_array_data(worlds, size_t)[0] = jl_typeinf_world;
jl_array_data(worlds, size_t)[compiler_world] = world; // might overwrite previous
- fargs[3] = _trim ? jl_true : jl_false;
+ fargs[3] = jl_box_long(trim);
size_t last_age = ct->world_age;
ct->world_age = jl_typeinf_world;
codeinfos = (jl_array_t*)jl_apply(fargs, 4);
@@ -549,7 +673,21 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm
jl_error("inference not available for generating compiled output");
}
fargs[0] = (jl_value_t*)codeinfos;
- void *data = jl_emit_native(codeinfos, llvmmod, &cgparams, _external_linkage);
+ void *data = jl_emit_native(codeinfos, llvmmod, &cgparams, external_linkage);
+
+ // examine everything just emitted and save it to the caches
+ if (!external_linkage) {
+ for (size_t i = 0, l = jl_array_nrows(codeinfos); i < l; i++) {
+ jl_value_t *item = jl_array_ptr_ref(codeinfos, i);
+ if (jl_is_code_instance(item)) {
+ // now add it to our compilation results
+ jl_code_instance_t *codeinst = (jl_code_instance_t*)item;
+ jl_code_info_t *src = (jl_code_info_t*)jl_array_ptr_ref(codeinfos, ++i);
+ assert(jl_is_code_info(src));
+ jl_add_codeinst_to_cache(codeinst, src);
+ }
+ }
+ }
// move everything inside, now that we've merged everything
// (before adding the exported headers)
@@ -593,7 +731,7 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm
// also be used be extern consumers like GPUCompiler.jl to obtain a module containing
// all reachable & inferrrable functions.
extern "C" JL_DLLEXPORT_CODEGEN
-void *jl_emit_native_impl(jl_array_t *codeinfos, LLVMOrcThreadSafeModuleRef llvmmod, const jl_cgparams_t *cgparams, int _external_linkage)
+void *jl_emit_native_impl(jl_array_t *codeinfos, LLVMOrcThreadSafeModuleRef llvmmod, const jl_cgparams_t *cgparams, int external_linkage)
{
JL_TIMING(NATIVE_AOT, NATIVE_Create);
++CreateNativeCalls;
@@ -601,7 +739,6 @@ void *jl_emit_native_impl(jl_array_t *codeinfos, LLVMOrcThreadSafeModuleRef llvm
if (cgparams == NULL)
cgparams = &jl_default_cgparams;
jl_native_code_desc_t *data = new jl_native_code_desc_t;
- jl_method_instance_t *mi = NULL;
orc::ThreadSafeContext ctx;
orc::ThreadSafeModule backing;
if (!llvmmod) {
@@ -621,7 +758,7 @@ void *jl_emit_native_impl(jl_array_t *codeinfos, LLVMOrcThreadSafeModuleRef llvm
params.getContext().setDiscardValueNames(true);
params.params = cgparams;
assert(params.imaging_mode); // `_imaging_mode` controls if broken features like code-coverage are disabled
- params.external_linkage = _external_linkage;
+ params.external_linkage = external_linkage;
params.temporary_roots = jl_alloc_array_1d(jl_array_any_type, 0);
JL_GC_PUSH3(¶ms.temporary_roots, &method_roots.list, &method_roots.keyset);
jl_compiled_functions_t compiled_functions;
@@ -637,7 +774,7 @@ void *jl_emit_native_impl(jl_array_t *codeinfos, LLVMOrcThreadSafeModuleRef llvm
assert(jl_is_code_info(src));
if (compiled_functions.count(codeinst))
continue; // skip any duplicates that accidentally made there way in here (or make this an error?)
- if (_external_linkage) {
+ if (external_linkage) {
uint8_t specsigflags;
jl_callptr_t invoke;
void *fptr;
@@ -651,26 +788,28 @@ void *jl_emit_native_impl(jl_array_t *codeinfos, LLVMOrcThreadSafeModuleRef llvm
orc::ThreadSafeModule result_m = jl_create_ts_module(name_from_method_instance(jl_get_ci_mi(codeinst)),
params.tsctx, clone.getModuleUnlocked()->getDataLayout(),
Triple(clone.getModuleUnlocked()->getTargetTriple()));
- jl_llvm_functions_t decls = jl_emit_codeinst(result_m, codeinst, src, params);
+ jl_llvm_functions_t decls;
+ if (!(params.params->force_emit_all) && jl_atomic_load_relaxed(&codeinst->invoke) == jl_fptr_const_return_addr)
+ decls.functionObject = "jl_fptr_const_return";
+ else
+ decls = jl_emit_codeinst(result_m, codeinst, src, params);
record_method_roots(method_roots, jl_get_ci_mi(codeinst));
if (result_m)
compiled_functions[codeinst] = {std::move(result_m), std::move(decls)};
- else if (params.params->trim) {
- // if we're building a small image, we need to compile everything
- // to ensure that we have all the information we need.
- jl_safe_printf("codegen failed to compile code root ");
- jl_(mi);
- abort();
- }
}
else {
- jl_value_t *sig = jl_array_ptr_ref(codeinfos, ++i);
- assert(jl_is_type(item) && jl_is_type(sig));
- jl_compile_extern_c(wrap(&clone), ¶ms, NULL, item, sig);
+ assert(jl_is_simplevector(item));
+ jl_value_t *rt = jl_svecref(item, 0);
+ jl_value_t *sig = jl_svecref(item, 1);
+ jl_value_t *nameval = jl_svec_len(item) == 2 ? jl_nothing : jl_svecref(item, 2);
+ assert(jl_is_type(rt) && jl_is_type(sig));
+ jl_generate_ccallable(clone.getModuleUnlocked(), nameval, rt, sig, params);
}
}
// finally, make sure all referenced methods get fixed up, particularly if the user declined to compile them
resolve_workqueue(params, method_roots, compiled_functions);
+ // including generating cfunction thunks
+ generate_cfunc_thunks(params, compiled_functions);
aot_optimize_roots(params, method_roots, compiled_functions);
params.temporary_roots = nullptr;
JL_GC_POP();
@@ -728,9 +867,12 @@ void *jl_emit_native_impl(jl_array_t *codeinfos, LLVMOrcThreadSafeModuleRef llvm
else if (func == "jl_fptr_sparam") {
func_id = -2;
}
- else if (decls.functionObject == "jl_f_opaque_closure_call") {
+ else if (func == "jl_f_opaque_closure_call") {
func_id = -4;
}
+ else if (func == "jl_fptr_const_return") {
+ func_id = -5;
+ }
else {
//Safe b/c context is locked by params
data->jl_sysimg_fvars.push_back(cast(clone.getModuleUnlocked()->getNamedValue(func)));
@@ -829,10 +971,18 @@ static GlobalVariable *emit_shard_table(Module &M, Type *T_size, Type *T_psize,
return tables_gv;
}
+static Function *emit_pgcstack_default_func(Module &M, Type *T_ptr) {
+ auto FT = FunctionType::get(T_ptr, false);
+ auto F = Function::Create(FT, GlobalValue::InternalLinkage, "pgcstack_default_func", &M);
+ llvm::IRBuilder<> builder(BasicBlock::Create(M.getContext(), "top", F));
+ builder.CreateRet(Constant::getNullValue(T_ptr));
+ return F;
+}
+
// See src/processor.h for documentation about this table. Corresponds to jl_image_ptls_t.
-static GlobalVariable *emit_ptls_table(Module &M, Type *T_size, Type *T_psize) {
+static GlobalVariable *emit_ptls_table(Module &M, Type *T_size, Type *T_ptr) {
std::array ptls_table{
- new GlobalVariable(M, T_size, false, GlobalValue::ExternalLinkage, Constant::getNullValue(T_size), "jl_pgcstack_func_slot"),
+ new GlobalVariable(M, T_ptr, false, GlobalValue::ExternalLinkage, emit_pgcstack_default_func(M, T_ptr), "jl_pgcstack_func_slot"),
new GlobalVariable(M, T_size, false, GlobalValue::ExternalLinkage, Constant::getNullValue(T_size), "jl_pgcstack_key_slot"),
new GlobalVariable(M, T_size, false, GlobalValue::ExternalLinkage, Constant::getNullValue(T_size), "jl_tls_offset"),
};
@@ -840,7 +990,7 @@ static GlobalVariable *emit_ptls_table(Module &M, Type *T_size, Type *T_psize) {
cast(gv)->setVisibility(GlobalValue::HiddenVisibility);
cast(gv)->setDSOLocal(true);
}
- auto ptls_table_arr = ConstantArray::get(ArrayType::get(T_psize, ptls_table.size()), ptls_table);
+ auto ptls_table_arr = ConstantArray::get(ArrayType::get(T_ptr, ptls_table.size()), ptls_table);
auto ptls_table_gv = new GlobalVariable(M, ptls_table_arr->getType(), false,
GlobalValue::ExternalLinkage, ptls_table_arr, "jl_ptls_table");
ptls_table_gv->setVisibility(GlobalValue::HiddenVisibility);
@@ -2039,6 +2189,7 @@ void jl_dump_native_impl(void *native_code,
Type *T_size = DL.getIntPtrType(Context);
Type *T_psize = T_size->getPointerTo();
+ Type *T_ptr = PointerType::get(Context, 0);
auto FT = FunctionType::get(Type::getInt8Ty(Context)->getPointerTo()->getPointerTo(), {}, false);
auto F = Function::Create(FT, Function::ExternalLinkage, "get_jl_RTLD_DEFAULT_handle_addr", metadataM);
@@ -2062,7 +2213,7 @@ void jl_dump_native_impl(void *native_code,
builder.CreateRet(ConstantInt::get(T_int32, 1));
}
if (imaging_mode) {
- auto specs = jl_get_llvm_clone_targets();
+ auto specs = jl_get_llvm_clone_targets(jl_options.cpu_target);
const uint32_t base_flags = has_veccall ? JL_TARGET_VEC_CALL : 0;
SmallVector data;
auto push_i32 = [&] (uint32_t v) {
@@ -2081,7 +2232,7 @@ void jl_dump_native_impl(void *native_code,
GlobalVariable::InternalLinkage,
value, "jl_dispatch_target_ids");
auto shards = emit_shard_table(metadataM, T_size, T_psize, threads);
- auto ptls = emit_ptls_table(metadataM, T_size, T_psize);
+ auto ptls = emit_ptls_table(metadataM, T_size, T_ptr);
auto header = emit_image_header(metadataM, threads, nfvars, ngvars);
auto AT = ArrayType::get(T_size, sizeof(jl_small_typeof) / sizeof(void*));
auto jl_small_typeof_copy = new GlobalVariable(metadataM, AT, false,
@@ -2201,7 +2352,7 @@ extern "C" JL_DLLEXPORT_CODEGEN jl_code_info_t *jl_gdbdumpcode(jl_method_instanc
// for use in reflection from Julia.
// This is paired with jl_dump_function_ir and jl_dump_function_asm, either of which will free all memory allocated here
extern "C" JL_DLLEXPORT_CODEGEN
-void jl_get_llvmf_defn_impl(jl_llvmf_dump_t* dump, jl_method_instance_t *mi, jl_code_info_t *src, char getwrapper, char optimize, const jl_cgparams_t params)
+void jl_get_llvmf_defn_impl(jl_llvmf_dump_t *dump, jl_method_instance_t *mi, jl_code_info_t *src, char getwrapper, char optimize, const jl_cgparams_t params)
{
// emit this function into a new llvm module
dump->F = nullptr;
@@ -2223,7 +2374,31 @@ void jl_get_llvmf_defn_impl(jl_llvmf_dump_t* dump, jl_method_instance_t *mi, jl_
output.imaging_mode = jl_options.image_codegen;
output.temporary_roots = jl_alloc_array_1d(jl_array_any_type, 0);
JL_GC_PUSH1(&output.temporary_roots);
- auto decls = jl_emit_code(m, mi, src, mi->specTypes, src->rettype, output);
+ jl_llvm_functions_t decls = jl_emit_code(m, mi, src, mi->specTypes, src->rettype, output);
+ // while not required, also emit the cfunc thunks, based on the
+ // inferred ABIs of their targets in the current latest world,
+ // since otherwise it is challenging to see all relevant codes
+ jl_compiled_functions_t compiled_functions;
+ size_t latestworld = jl_atomic_load_acquire(&jl_world_counter);
+ for (cfunc_decl_t &cfunc : output.cfuncs) {
+ jl_value_t *sigt = cfunc.sigt;
+ JL_GC_PROMISE_ROOTED(sigt);
+ jl_method_instance_t *mi = jl_get_specialization1((jl_tupletype_t*)sigt, latestworld, 0);
+ if (mi == nullptr)
+ continue;
+ jl_code_instance_t *codeinst = jl_type_infer(mi, latestworld, SOURCE_MODE_NOT_REQUIRED);
+ if (codeinst == nullptr || compiled_functions.count(codeinst))
+ continue;
+ orc::ThreadSafeModule decl_m = jl_create_ts_module("extern", ctx);
+ jl_llvm_functions_t decls;
+ if (jl_atomic_load_relaxed(&codeinst->invoke) == jl_fptr_const_return_addr)
+ decls.functionObject = "jl_fptr_const_return";
+ else
+ decls = jl_emit_codedecls(decl_m, codeinst, output);
+ compiled_functions[codeinst] = {std::move(decl_m), std::move(decls)};
+ }
+ generate_cfunc_thunks(output, compiled_functions);
+ compiled_functions.clear();
output.temporary_roots = nullptr;
JL_GC_POP(); // GC the global_targets array contents now since reflection doesn't need it
diff --git a/src/ast.c b/src/ast.c
index 0f24d96393f2f..ab6b04efa526a 100644
--- a/src/ast.c
+++ b/src/ast.c
@@ -178,7 +178,7 @@ static value_t fl_defined_julia_global(fl_context_t *fl_ctx, value_t *args, uint
jl_sym_t *var = scmsym_to_julia(fl_ctx, args[0]);
jl_binding_t *b = jl_get_module_binding(ctx->module, var, 0);
jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- return (bpart != NULL && decode_restriction_kind(jl_atomic_load_relaxed(&bpart->restriction)) == BINDING_KIND_GLOBAL) ? fl_ctx->T : fl_ctx->F;
+ return (bpart != NULL && jl_binding_kind(bpart) == PARTITION_KIND_GLOBAL) ? fl_ctx->T : fl_ctx->F;
}
// Used to generate a unique suffix for a given symbol (e.g. variable or type name)
@@ -1369,6 +1369,34 @@ JL_DLLEXPORT jl_value_t *jl_expand_stmt(jl_value_t *expr, jl_module_t *inmodule)
return jl_expand_stmt_with_loc(expr, inmodule, "none", 0);
}
+jl_code_info_t *jl_outer_ctor_body(jl_value_t *thistype, size_t nfields, size_t nsparams, jl_module_t *inmodule, const char *file, int line)
+{
+ JL_TIMING(LOWERING, LOWERING);
+ jl_timing_show_location(file, line, inmodule, JL_TIMING_DEFAULT_BLOCK);
+ jl_expr_t *expr = jl_exprn(jl_empty_sym, 3);
+ JL_GC_PUSH1(&expr);
+ jl_exprargset(expr, 0, thistype);
+ jl_exprargset(expr, 1, jl_box_long(nfields));
+ jl_exprargset(expr, 2, jl_box_long(nsparams));
+ jl_code_info_t *ci = (jl_code_info_t*)jl_call_scm_on_ast_and_loc("jl-default-outer-ctor-body", (jl_value_t*)expr, inmodule, file, line);
+ JL_GC_POP();
+ assert(jl_is_code_info(ci));
+ return ci;
+}
+
+jl_code_info_t *jl_inner_ctor_body(jl_array_t *fieldkinds, jl_module_t *inmodule, const char *file, int line)
+{
+ JL_TIMING(LOWERING, LOWERING);
+ jl_timing_show_location(file, line, inmodule, JL_TIMING_DEFAULT_BLOCK);
+ jl_expr_t *expr = jl_exprn(jl_empty_sym, 0);
+ JL_GC_PUSH1(&expr);
+ expr->args = fieldkinds;
+ jl_code_info_t *ci = (jl_code_info_t*)jl_call_scm_on_ast_and_loc("jl-default-inner-ctor-body", (jl_value_t*)expr, inmodule, file, line);
+ JL_GC_POP();
+ assert(jl_is_code_info(ci));
+ return ci;
+}
+
//------------------------------------------------------------------------------
// Parsing API and utils for calling parser from runtime
diff --git a/src/builtin_proto.h b/src/builtin_proto.h
index 77463ae4884cb..c82ec77414129 100644
--- a/src/builtin_proto.h
+++ b/src/builtin_proto.h
@@ -22,10 +22,9 @@ extern "C" {
#endif
DECLARE_BUILTIN(_apply_iterate);
-DECLARE_BUILTIN(_apply_pure);
-DECLARE_BUILTIN(_call_in_world);
+DECLARE_BUILTIN(invoke_in_world);
DECLARE_BUILTIN(_call_in_world_total);
-DECLARE_BUILTIN(_call_latest);
+DECLARE_BUILTIN(invokelatest);
DECLARE_BUILTIN(_compute_sparams);
DECLARE_BUILTIN(_expr);
DECLARE_BUILTIN(_svec_ref);
@@ -80,6 +79,7 @@ JL_CALLABLE(jl_f__structtype);
JL_CALLABLE(jl_f__abstracttype);
JL_CALLABLE(jl_f__primitivetype);
JL_CALLABLE(jl_f__setsuper);
+JL_CALLABLE(jl_f__defaultctors);
JL_CALLABLE(jl_f__equiv_typedef);
JL_CALLABLE(jl_f_get_binding_type);
JL_CALLABLE(jl_f__compute_sparams);
diff --git a/src/builtins.c b/src/builtins.c
index f67ef65d35356..a2cae857f26b4 100644
--- a/src/builtins.c
+++ b/src/builtins.c
@@ -637,9 +637,14 @@ static jl_value_t *jl_arrayref(jl_array_t *a, size_t i)
return jl_memoryrefget(jl_memoryrefindex(a->ref, i), 0);
}
-static jl_value_t *do_apply(jl_value_t **args, uint32_t nargs, jl_value_t *iterate)
+JL_CALLABLE(jl_f__apply_iterate)
{
- jl_function_t *f = args[0];
+ JL_NARGSV(_apply_iterate, 2);
+ jl_function_t *iterate = args[0];
+ jl_function_t *f = args[1];
+ assert(iterate);
+ args += 1;
+ nargs -= 1;
if (nargs == 2) {
// some common simple cases
if (f == jl_builtin_svec) {
@@ -692,9 +697,6 @@ static jl_value_t *do_apply(jl_value_t **args, uint32_t nargs, jl_value_t *itera
extra += 1;
}
}
- if (extra && iterate == NULL) {
- jl_undefined_var_error(jl_symbol("iterate"), NULL);
- }
// allocate space for the argument array and gc roots for it
// based on our previous estimates
// use the stack if we have a good estimate that it is small
@@ -841,41 +843,10 @@ static jl_value_t *do_apply(jl_value_t **args, uint32_t nargs, jl_value_t *itera
return result;
}
-JL_CALLABLE(jl_f__apply_iterate)
-{
- JL_NARGSV(_apply_iterate, 2);
- return do_apply(args + 1, nargs - 1, args[0]);
-}
-
-// this is like `_apply`, but with quasi-exact checks to make sure it is pure
-JL_CALLABLE(jl_f__apply_pure)
-{
- jl_task_t *ct = jl_current_task;
- int last_in = ct->ptls->in_pure_callback;
- jl_value_t *ret = NULL;
- JL_TRY {
- ct->ptls->in_pure_callback = 1;
- // because this function was declared pure,
- // we should be allowed to run it in any world
- // so we run it in the newest world;
- // because, why not :)
- // and `promote` works better this way
- size_t last_age = ct->world_age;
- ct->world_age = jl_atomic_load_acquire(&jl_world_counter);
- ret = do_apply(args, nargs, NULL);
- ct->world_age = last_age;
- ct->ptls->in_pure_callback = last_in;
- }
- JL_CATCH {
- ct->ptls->in_pure_callback = last_in;
- jl_rethrow();
- }
- return ret;
-}
-
// this is like a regular call, but always runs in the newest world
-JL_CALLABLE(jl_f__call_latest)
+JL_CALLABLE(jl_f_invokelatest)
{
+ JL_NARGSV(invokelatest, 1);
jl_task_t *ct = jl_current_task;
size_t last_age = ct->world_age;
if (!ct->ptls->in_pure_callback)
@@ -885,14 +856,14 @@ JL_CALLABLE(jl_f__call_latest)
return ret;
}
-// Like call_in_world, but runs in the specified world.
+// Like invokelatest, but runs in the specified world.
// If world > jl_atomic_load_acquire(&jl_world_counter), run in the latest world.
-JL_CALLABLE(jl_f__call_in_world)
+JL_CALLABLE(jl_f_invoke_in_world)
{
- JL_NARGSV(_apply_in_world, 2);
+ JL_NARGSV(invoke_in_world, 2);
jl_task_t *ct = jl_current_task;
size_t last_age = ct->world_age;
- JL_TYPECHK(_apply_in_world, ulong, args[0]);
+ JL_TYPECHK(invoke_in_world, ulong, args[0]);
size_t world = jl_unbox_ulong(args[0]);
if (!ct->ptls->in_pure_callback) {
ct->world_age = jl_atomic_load_acquire(&jl_world_counter);
@@ -907,7 +878,7 @@ JL_CALLABLE(jl_f__call_in_world)
JL_CALLABLE(jl_f__call_in_world_total)
{
JL_NARGSV(_call_in_world_total, 2);
- JL_TYPECHK(_apply_in_world, ulong, args[0]);
+ JL_TYPECHK(_call_in_world_total, ulong, args[0]);
jl_task_t *ct = jl_current_task;
int last_in = ct->ptls->in_pure_callback;
jl_value_t *ret = NULL;
@@ -2197,11 +2168,13 @@ static int references_name(jl_value_t *p, jl_typename_t *name, int affects_layou
JL_CALLABLE(jl_f__typebody)
{
- JL_NARGS(_typebody!, 1, 2);
- jl_datatype_t *dt = (jl_datatype_t*)jl_unwrap_unionall(args[0]);
+ JL_NARGS(_typebody!, 2, 3);
+ jl_value_t *prev = args[0];
+ jl_value_t *tret = args[1];
+ jl_datatype_t *dt = (jl_datatype_t*)jl_unwrap_unionall(args[1]);
JL_TYPECHK(_typebody!, datatype, (jl_value_t*)dt);
- if (nargs == 2) {
- jl_value_t *ft = args[1];
+ if (nargs == 3) {
+ jl_value_t *ft = args[2];
JL_TYPECHK(_typebody!, simplevector, ft);
size_t nf = jl_svec_len(ft);
for (size_t i = 0; i < nf; i++) {
@@ -2212,30 +2185,53 @@ JL_CALLABLE(jl_f__typebody)
(jl_value_t*)jl_type_type, elt);
}
}
- if (dt->types != NULL) {
- if (!equiv_field_types((jl_value_t*)dt->types, ft))
- jl_errorf("invalid redefinition of type %s", jl_symbol_name(dt->name->name));
- }
- else {
- dt->types = (jl_svec_t*)ft;
- jl_gc_wb(dt, ft);
- // If a supertype can reference the same type, then we may not be
- // able to compute the layout of the object before needing to
- // publish it, so we must assume it cannot be inlined, if that
- // check passes, then we also still need to check the fields too.
- if (!dt->name->mutabl && (nf == 0 || !references_name((jl_value_t*)dt->super, dt->name, 0, 1))) {
- int mayinlinealloc = 1;
- size_t i;
- for (i = 0; i < nf; i++) {
- jl_value_t *fld = jl_svecref(ft, i);
- if (references_name(fld, dt->name, 1, 1)) {
- mayinlinealloc = 0;
- break;
+ // Optimization: To avoid lots of unnecessary churning, lowering contains an optimization
+ // that re-uses the typevars of an existing definition (if any exists) for compute the field
+ // types. If such a previous type exists, there are two possibilities:
+ // 1. The field types are identical, we don't need to do anything and can proceed with the
+ // old type as if it was the new one.
+ // 2. The field types are not identical, in which case we need to rename the typevars
+ // back to their equivalents in the new type before proceeding.
+ if (prev == jl_false) {
+ if (dt->types != NULL)
+ jl_errorf("Internal Error: Expected type fields to be unset");
+ } else {
+ jl_datatype_t *prev_dt = (jl_datatype_t*)jl_unwrap_unionall(prev);
+ JL_TYPECHK(_typebody!, datatype, (jl_value_t*)prev_dt);
+ if (equiv_field_types((jl_value_t*)prev_dt->types, ft)) {
+ tret = prev;
+ goto have_type;
+ } else {
+ if (jl_svec_len(prev_dt->parameters) != jl_svec_len(dt->parameters))
+ jl_errorf("Internal Error: Types should not have been considered equivalent");
+ for (size_t i = 0; i < nf; i++) {
+ jl_value_t *elt = jl_svecref(ft, i);
+ for (int j = 0; j < jl_svec_len(prev_dt->parameters); ++j) {
+ // Only the last svecset matters for semantics, but we re-use the GC root
+ elt = jl_substitute_var(elt, (jl_tvar_t *)jl_svecref(prev_dt->parameters, j), jl_svecref(dt->parameters, j));
+ jl_svecset(ft, i, elt);
}
}
- dt->name->mayinlinealloc = mayinlinealloc;
}
}
+ dt->types = (jl_svec_t*)ft;
+ jl_gc_wb(dt, ft);
+ // If a supertype can reference the same type, then we may not be
+ // able to compute the layout of the object before needing to
+ // publish it, so we must assume it cannot be inlined, if that
+ // check passes, then we also still need to check the fields too.
+ if (!dt->name->mutabl && (nf == 0 || !references_name((jl_value_t*)dt->super, dt->name, 0, 1))) {
+ int mayinlinealloc = 1;
+ size_t i;
+ for (i = 0; i < nf; i++) {
+ jl_value_t *fld = jl_svecref(ft, i);
+ if (references_name(fld, dt->name, 1, 1)) {
+ mayinlinealloc = 0;
+ break;
+ }
+ }
+ dt->name->mayinlinealloc = mayinlinealloc;
+ }
}
JL_TRY {
@@ -2248,7 +2244,8 @@ JL_CALLABLE(jl_f__typebody)
if (jl_is_structtype(dt))
jl_compute_field_offsets(dt);
- return jl_nothing;
+have_type:
+ return tret;
}
// this is a heuristic for allowing "redefining" a type to something identical
@@ -2319,6 +2316,13 @@ JL_CALLABLE(jl_f__equiv_typedef)
return equiv_type(args[0], args[1]) ? jl_true : jl_false;
}
+JL_CALLABLE(jl_f__defaultctors)
+{
+ JL_NARGS(_defaultctors, 2, 2);
+ jl_ctor_def(args[0], args[1]);
+ return jl_nothing;
+}
+
// IntrinsicFunctions ---------------------------------------------------------
static void (*runtime_fp[num_intrinsics])(void);
@@ -2393,8 +2397,7 @@ static void add_intrinsic(jl_module_t *inm, const char *name, enum intrinsic f)
{
jl_value_t *i = jl_permbox32(jl_intrinsic_type, 0, (int32_t)f);
jl_sym_t *sym = jl_symbol(name);
- jl_set_const(inm, sym, i);
- jl_module_public(inm, sym, 1);
+ jl_set_initial_const(inm, sym, i, 1);
}
void jl_init_intrinsic_properties(void) JL_GC_DISABLED
@@ -2410,9 +2413,8 @@ void jl_init_intrinsic_properties(void) JL_GC_DISABLED
void jl_init_intrinsic_functions(void) JL_GC_DISABLED
{
- jl_module_t *inm = jl_new_module(jl_symbol("Intrinsics"), NULL);
- inm->parent = jl_core_module;
- jl_set_const(jl_core_module, jl_symbol("Intrinsics"), (jl_value_t*)inm);
+ jl_module_t *inm = jl_new_module_(jl_symbol("Intrinsics"), jl_core_module, 0, 1);
+ jl_set_initial_const(jl_core_module, jl_symbol("Intrinsics"), (jl_value_t*)inm, 0);
jl_mk_builtin_func(jl_intrinsic_type, "IntrinsicFunction", jl_f_intrinsic_call);
jl_mk_builtin_func(
(jl_datatype_t*)jl_unwrap_unionall((jl_value_t*)jl_opaque_closure_type),
@@ -2434,7 +2436,7 @@ void jl_init_intrinsic_functions(void) JL_GC_DISABLED
static void add_builtin(const char *name, jl_value_t *v)
{
- jl_set_const(jl_core_module, jl_symbol(name), v);
+ jl_set_initial_const(jl_core_module, jl_symbol(name), v, 0);
}
jl_fptr_args_t jl_get_builtin_fptr(jl_datatype_t *dt)
@@ -2506,15 +2508,15 @@ void jl_init_primitives(void) JL_GC_DISABLED
jl_builtin__apply_iterate = add_builtin_func("_apply_iterate", jl_f__apply_iterate);
jl_builtin__expr = add_builtin_func("_expr", jl_f__expr);
jl_builtin_svec = add_builtin_func("svec", jl_f_svec);
- add_builtin_func("_apply_pure", jl_f__apply_pure);
- add_builtin_func("_call_latest", jl_f__call_latest);
- add_builtin_func("_call_in_world", jl_f__call_in_world);
+ add_builtin_func("invokelatest", jl_f_invokelatest);
+ add_builtin_func("invoke_in_world", jl_f_invoke_in_world);
add_builtin_func("_call_in_world_total", jl_f__call_in_world_total);
add_builtin_func("_typevar", jl_f__typevar);
add_builtin_func("_structtype", jl_f__structtype);
add_builtin_func("_abstracttype", jl_f__abstracttype);
add_builtin_func("_primitivetype", jl_f__primitivetype);
add_builtin_func("_setsuper!", jl_f__setsuper);
+ add_builtin_func("_defaultctors", jl_f__defaultctors);
jl_builtin__typebody = add_builtin_func("_typebody!", jl_f__typebody);
add_builtin_func("_equiv_typedef", jl_f__equiv_typedef);
jl_builtin_donotdelete = add_builtin_func("donotdelete", jl_f_donotdelete);
diff --git a/src/ccall.cpp b/src/ccall.cpp
index eb64adef447f4..865278d525384 100644
--- a/src/ccall.cpp
+++ b/src/ccall.cpp
@@ -547,7 +547,7 @@ static Value *julia_to_native(
Align align(julia_alignment(jlto));
Value *slot = emit_static_alloca(ctx, to, align);
setName(ctx.emission_context, slot, "native_convert_buffer");
- emit_unbox_store(ctx, jvinfo, slot, ctx.tbaa().tbaa_stack, align);
+ emit_unbox_store(ctx, jvinfo, slot, ctx.tbaa().tbaa_stack, align, align);
return slot;
}
@@ -1134,6 +1134,7 @@ class function_sig_t {
AttributeList attributes; // vector of function call site attributes
Type *lrt; // input parameter of the llvm return type (from julia_struct_to_llvm)
bool retboxed; // input parameter indicating whether lrt is jl_value_t*
+ bool gc_safe; // input parameter indicating whether the call is safe to execute concurrently to GC
Type *prt; // out parameter of the llvm return type for the function signature
int sret; // out parameter for indicating whether return value has been moved to the first argument position
std::string err_msg;
@@ -1146,8 +1147,8 @@ class function_sig_t {
size_t nreqargs; // number of required arguments in ccall function definition
jl_codegen_params_t *ctx;
- function_sig_t(const char *fname, Type *lrt, jl_value_t *rt, bool retboxed, jl_svec_t *at, jl_unionall_t *unionall_env, size_t nreqargs, CallingConv::ID cc, bool llvmcall, jl_codegen_params_t *ctx)
- : lrt(lrt), retboxed(retboxed),
+ function_sig_t(const char *fname, Type *lrt, jl_value_t *rt, bool retboxed, bool gc_safe, jl_svec_t *at, jl_unionall_t *unionall_env, size_t nreqargs, CallingConv::ID cc, bool llvmcall, jl_codegen_params_t *ctx)
+ : lrt(lrt), retboxed(retboxed), gc_safe(gc_safe),
prt(NULL), sret(0), cc(cc), llvmcall(llvmcall),
at(at), rt(rt), unionall_env(unionall_env),
nccallargs(jl_svec_len(at)), nreqargs(nreqargs),
@@ -1295,6 +1296,7 @@ std::string generate_func_sig(const char *fname)
RetAttrs = RetAttrs.addAttribute(LLVMCtx, Attribute::NonNull);
if (rt == jl_bottom_type)
FnAttrs = FnAttrs.addAttribute(LLVMCtx, Attribute::NoReturn);
+
assert(attributes.isEmpty());
attributes = AttributeList::get(LLVMCtx, FnAttrs, RetAttrs, paramattrs);
return "";
@@ -1412,7 +1414,7 @@ static const std::string verify_ccall_sig(jl_value_t *&rt, jl_value_t *at,
const int fc_args_start = 6;
-// Expr(:foreigncall, pointer, rettype, (argtypes...), nreq, [cconv | (cconv, effects)], args..., roots...)
+// Expr(:foreigncall, pointer, rettype, (argtypes...), nreq, gc_safe, [cconv | (cconv, effects)], args..., roots...)
static jl_cgval_t emit_ccall(jl_codectx_t &ctx, jl_value_t **args, size_t nargs)
{
JL_NARGSV(ccall, 5);
@@ -1424,11 +1426,13 @@ static jl_cgval_t emit_ccall(jl_codectx_t &ctx, jl_value_t **args, size_t nargs)
assert(jl_is_quotenode(args[5]));
jl_value_t *jlcc = jl_quotenode_value(args[5]);
jl_sym_t *cc_sym = NULL;
+ bool gc_safe = false;
if (jl_is_symbol(jlcc)) {
cc_sym = (jl_sym_t*)jlcc;
}
else if (jl_is_tuple(jlcc)) {
cc_sym = (jl_sym_t*)jl_get_nth_field_noalloc(jlcc, 0);
+ gc_safe = jl_unbox_bool(jl_get_nth_field_checked(jlcc, 2));
}
assert(jl_is_symbol(cc_sym));
native_sym_arg_t symarg = {};
@@ -1547,7 +1551,7 @@ static jl_cgval_t emit_ccall(jl_codectx_t &ctx, jl_value_t **args, size_t nargs)
}
if (rt != args[2] && rt != (jl_value_t*)jl_any_type)
jl_temporary_root(ctx, rt);
- function_sig_t sig("ccall", lrt, rt, retboxed,
+ function_sig_t sig("ccall", lrt, rt, retboxed, gc_safe,
(jl_svec_t*)at, unionall, nreqargs,
cc, llvmcall, &ctx.emission_context);
for (size_t i = 0; i < nccallargs; i++) {
@@ -1970,6 +1974,8 @@ static jl_cgval_t emit_ccall(jl_codectx_t &ctx, jl_value_t **args, size_t nargs)
return retval;
}
+static inline Constant *literal_static_pointer_val(const void *p, Type *T);
+
jl_cgval_t function_sig_t::emit_a_ccall(
jl_codectx_t &ctx,
const native_sym_arg_t &symarg,
@@ -2156,11 +2162,16 @@ jl_cgval_t function_sig_t::emit_a_ccall(
}
}
- OperandBundleDef OpBundle("jl_roots", gc_uses);
+ // Potentially we could drop `jl_roots(gc_uses)` in the presence of `gc-transition(gc_uses)`
+ SmallVector bundles;
+ if (!gc_uses.empty())
+ bundles.push_back(OperandBundleDef("jl_roots", gc_uses));
+ if (gc_safe)
+ bundles.push_back(OperandBundleDef("gc-transition", ArrayRef {}));
// the actual call
CallInst *ret = ctx.builder.CreateCall(functype, llvmf,
argvals,
- ArrayRef(&OpBundle, gc_uses.empty() ? 0 : 1));
+ bundles);
((CallInst*)ret)->setAttributes(attributes);
if (cc != CallingConv::C)
diff --git a/src/cgutils.cpp b/src/cgutils.cpp
index 98c5627578b80..f3425d058841a 100644
--- a/src/cgutils.cpp
+++ b/src/cgutils.cpp
@@ -301,7 +301,7 @@ static Value *emit_pointer_from_objref(jl_codectx_t &ctx, Value *V)
}
static Value *emit_unbox(jl_codectx_t &ctx, Type *to, const jl_cgval_t &x, jl_value_t *jt);
-static void emit_unbox_store(jl_codectx_t &ctx, const jl_cgval_t &x, Value* dest, MDNode *tbaa_dest, Align alignment, bool isVolatile=false);
+static void emit_unbox_store(jl_codectx_t &ctx, const jl_cgval_t &x, Value* dest, MDNode *tbaa_dest, MaybeAlign align_src, Align align_dst, bool isVolatile=false);
static bool type_is_permalloc(jl_value_t *typ)
{
@@ -391,19 +391,17 @@ static llvm::SmallVector get_gc_roots_for(jl_codectx_t &ctx, const jl_
static void jl_temporary_root(jl_codegen_params_t &ctx, jl_value_t *val);
static void jl_temporary_root(jl_codectx_t &ctx, jl_value_t *val);
-static inline Constant *literal_static_pointer_val(const void *p, Type *T);
-static Constant *julia_pgv(jl_codectx_t &ctx, const char *cname, void *addr)
+static Constant *julia_pgv(jl_codegen_params_t ¶ms, Module *M, const char *cname, void *addr)
{
// emit a GlobalVariable for a jl_value_t named "cname"
// store the name given so we can reuse it (facilitating merging later)
// so first see if there already is a GlobalVariable for this address
- GlobalVariable* &gv = ctx.emission_context.global_targets[addr];
- Module *M = jl_Module;
+ GlobalVariable* &gv = params.global_targets[addr];
StringRef localname;
std::string gvname;
if (!gv) {
- uint64_t id = jl_atomic_fetch_add_relaxed(&globalUniqueGeneratedNames, 1); // TODO: use ctx.emission_context.global_targets.size()
+ uint64_t id = jl_atomic_fetch_add_relaxed(&globalUniqueGeneratedNames, 1); // TODO: use params.global_targets.size()
raw_string_ostream(gvname) << cname << id;
localname = StringRef(gvname);
}
@@ -413,9 +411,9 @@ static Constant *julia_pgv(jl_codectx_t &ctx, const char *cname, void *addr)
gv = cast_or_null(M->getNamedValue(localname));
}
if (gv == nullptr)
- gv = new GlobalVariable(*M, ctx.types().T_pjlvalue,
+ gv = new GlobalVariable(*M, getPointerTy(M->getContext()),
false, GlobalVariable::ExternalLinkage,
- NULL, localname);
+ nullptr, localname);
// LLVM passes sometimes strip metadata when moving load around
// since the load at the new location satisfy the same condition as the original one.
// Mark the global as constant to LLVM code using our own metadata
@@ -426,7 +424,7 @@ static Constant *julia_pgv(jl_codectx_t &ctx, const char *cname, void *addr)
return gv;
}
-static Constant *julia_pgv(jl_codectx_t &ctx, const char *prefix, jl_sym_t *name, jl_module_t *mod, void *addr)
+static Constant *julia_pgv(jl_codegen_params_t ¶ms, Module *M, const char *prefix, jl_sym_t *name, jl_module_t *mod, void *addr)
{
// emit a GlobalVariable for a jl_value_t, using the prefix, name, and module to
// to create a readable name of the form prefixModA.ModB.name#
@@ -451,53 +449,49 @@ static Constant *julia_pgv(jl_codectx_t &ctx, const char *prefix, jl_sym_t *name
finalname.resize(orig_end + prefix_name.size());
std::reverse_copy(prefix_name.begin(), prefix_name.end(), finalname.begin() + orig_end);
std::reverse(finalname.begin(), finalname.end());
- return julia_pgv(ctx, finalname.c_str(), addr);
+ return julia_pgv(params, M, finalname.c_str(), addr);
}
static JuliaVariable *julia_const_gv(jl_value_t *val);
-static Constant *literal_pointer_val_slot(jl_codectx_t &ctx, jl_value_t *p)
+Constant *literal_pointer_val_slot(jl_codegen_params_t ¶ms, Module *M, jl_value_t *p)
{
// emit a pointer to a jl_value_t* which will allow it to be valid across reloading code
// also, try to give it a nice name for gdb, for easy identification
if (JuliaVariable *gv = julia_const_gv(p)) {
// if this is a known special object, use the existing GlobalValue
- return prepare_global_in(jl_Module, gv);
+ return prepare_global_in(M, gv);
}
if (jl_is_datatype(p)) {
jl_datatype_t *addr = (jl_datatype_t*)p;
if (addr->smalltag) {
// some common builtin datatypes have a special pool for accessing them by smalltag id
- Constant *tag = ConstantInt::get(getInt32Ty(ctx.builder.getContext()), addr->smalltag << 4);
- Constant *smallp = ConstantExpr::getInBoundsGetElementPtr(getInt8Ty(ctx.builder.getContext()), prepare_global_in(jl_Module, jl_small_typeof_var), tag);
- auto ty = ctx.types().T_ppjlvalue;
- if (ty->getPointerAddressSpace() == smallp->getType()->getPointerAddressSpace())
- return ConstantExpr::getBitCast(smallp, ty);
- else {
- Constant *newsmallp = ConstantExpr::getAddrSpaceCast(smallp, ty);
- return ConstantExpr::getBitCast(newsmallp, ty);
- }
+ Constant *tag = ConstantInt::get(getInt32Ty(M->getContext()), addr->smalltag << 4);
+ Constant *smallp = ConstantExpr::getInBoundsGetElementPtr(getInt8Ty(M->getContext()), prepare_global_in(M, jl_small_typeof_var), tag);
+ if (smallp->getType()->getPointerAddressSpace() != 0)
+ smallp = ConstantExpr::getAddrSpaceCast(smallp, getPointerTy(M->getContext()));
+ return smallp;
}
// DataTypes are prefixed with a +
- return julia_pgv(ctx, "+", addr->name->name, addr->name->module, p);
+ return julia_pgv(params, M, "+", addr->name->name, addr->name->module, p);
}
if (jl_is_method(p)) {
jl_method_t *m = (jl_method_t*)p;
// functions are prefixed with a -
- return julia_pgv(ctx, "-", m->name, m->module, p);
+ return julia_pgv(params, M, "-", m->name, m->module, p);
}
if (jl_is_method_instance(p)) {
jl_method_instance_t *linfo = (jl_method_instance_t*)p;
// Type-inferred functions are also prefixed with a -
if (jl_is_method(linfo->def.method))
- return julia_pgv(ctx, "-", linfo->def.method->name, linfo->def.method->module, p);
+ return julia_pgv(params, M, "-", linfo->def.method->name, linfo->def.method->module, p);
}
if (jl_is_symbol(p)) {
jl_sym_t *addr = (jl_sym_t*)p;
// Symbols are prefixed with jl_sym#
- return julia_pgv(ctx, "jl_sym#", addr, NULL, p);
+ return julia_pgv(params, M, "jl_sym#", addr, NULL, p);
}
// something else gets just a generic name
- return julia_pgv(ctx, "jl_global#", p);
+ return julia_pgv(params, M, "jl_global#", p);
}
static size_t dereferenceable_size(jl_value_t *jt)
@@ -570,7 +564,7 @@ static Value *literal_pointer_val(jl_codectx_t &ctx, jl_value_t *p)
{
if (p == NULL)
return Constant::getNullValue(ctx.types().T_pjlvalue);
- Value *pgv = literal_pointer_val_slot(ctx, p);
+ Value *pgv = literal_pointer_val_slot(ctx.emission_context, jl_Module, p);
jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const);
auto load = ai.decorateInst(maybe_mark_load_dereferenceable(
ctx.builder.CreateAlignedLoad(ctx.types().T_pjlvalue, pgv, Align(sizeof(void*))),
@@ -610,7 +604,7 @@ static Value *julia_binding_gv(jl_codectx_t &ctx, jl_binding_t *b)
// emit a literal_pointer_val to a jl_binding_t
// binding->value are prefixed with *
jl_globalref_t *gr = b->globalref;
- Value *pgv = gr ? julia_pgv(ctx, "*", gr->name, gr->mod, b) : julia_pgv(ctx, "*jl_bnd#", b);
+ Value *pgv = gr ? julia_pgv(ctx.emission_context, jl_Module, "*", gr->name, gr->mod, b) : julia_pgv(ctx.emission_context, jl_Module, "*jl_bnd#", b);
jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const);
auto load = ai.decorateInst(ctx.builder.CreateAlignedLoad(ctx.types().T_pjlvalue, pgv, Align(sizeof(void*))));
setName(ctx.emission_context, load, pgv->getName());
@@ -1096,7 +1090,7 @@ static void split_value_into(jl_codectx_t &ctx, const jl_cgval_t &x, Align align
return;
}
if (inline_roots_ptr == nullptr) {
- emit_unbox_store(ctx, x, dst, ctx.tbaa().tbaa_stack, align_dst, isVolatileStore);
+ emit_unbox_store(ctx, x, dst, ctx.tbaa().tbaa_stack, align_src, align_dst, isVolatileStore);
return;
}
Value *src = data_pointer(ctx, value_to_pointer(ctx, x));
@@ -1158,7 +1152,7 @@ static void split_value_into(jl_codectx_t &ctx, const jl_cgval_t &x, Align align
return;
}
if (inline_roots.empty()) {
- emit_unbox_store(ctx, x, dst, ctx.tbaa().tbaa_stack, align_dst);
+ emit_unbox_store(ctx, x, dst, ctx.tbaa().tbaa_stack, align_src, align_dst, false);
return;
}
Value *src = data_pointer(ctx, value_to_pointer(ctx, x));
@@ -1352,7 +1346,7 @@ static Value *emit_typeof(jl_codectx_t &ctx, const jl_cgval_t &p, bool maybenull
ptr = get_pointer_to_constant(ctx.emission_context, ConstantInt::get(expr_type, jt->smalltag << 4), Align(sizeof(jl_value_t*)), StringRef("_j_smalltag_") + jl_symbol_name(jt->name->name), *jl_Module);
}
else {
- ptr = ConstantExpr::getBitCast(literal_pointer_val_slot(ctx, (jl_value_t*)jt), datatype_or_p->getType());
+ ptr = ConstantExpr::getBitCast(literal_pointer_val_slot(ctx.emission_context, jl_Module, (jl_value_t*)jt), datatype_or_p->getType());
}
datatype_or_p = ctx.builder.CreateSelect(cmp, ptr, datatype_or_p);
setName(ctx.emission_context, datatype_or_p, "typetag_ptr");
@@ -1485,6 +1479,7 @@ static Value *emit_sizeof(jl_codectx_t &ctx, const jl_cgval_t &p)
return dyn_size;
}
}
+*/
static Value *emit_datatype_mutabl(jl_codectx_t &ctx, Value *dt)
{
@@ -1499,7 +1494,6 @@ static Value *emit_datatype_mutabl(jl_codectx_t &ctx, Value *dt)
mutabl = ctx.builder.CreateLShr(mutabl, 1);
return ctx.builder.CreateTrunc(mutabl, getInt1Ty(ctx.builder.getContext()));
}
-*/
static Value *emit_datatype_isprimitivetype(jl_codectx_t &ctx, Value *typ)
{
@@ -2284,15 +2278,9 @@ static jl_cgval_t typed_store(jl_codectx_t &ctx,
const jl_cgval_t argv[3] = { cmp, lhs, rhs };
jl_cgval_t ret;
if (modifyop) {
- ret = emit_invoke(ctx, *modifyop, argv, 3, (jl_value_t*)jl_any_type, nullptr);
+ ret = emit_invoke(ctx, *modifyop, argv, 3, (jl_value_t*)jl_any_type);
}
else {
- if (trim_may_error(ctx.params->trim)) {
- // if we know the return type, we can assume the result is of that type
- errs() << "ERROR: Dynamic call to setfield/modifyfield\n";
- errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n";
- print_stacktrace(ctx, ctx.params->trim);
- }
Value *callval = emit_jlcall(ctx, jlapplygeneric_func, nullptr, argv, 3, julia_call);
ret = mark_julia_type(ctx, callval, true, jl_any_type);
}
@@ -2363,7 +2351,7 @@ static jl_cgval_t typed_store(jl_codectx_t &ctx,
r = boxed(ctx, rhs);
}
else if (intcast) {
- emit_unbox_store(ctx, rhs, intcast, ctx.tbaa().tbaa_stack, intcast->getAlign());
+ emit_unbox_store(ctx, rhs, intcast, ctx.tbaa().tbaa_stack, MaybeAlign(), intcast->getAlign());
r = ctx.builder.CreateLoad(realelty, intcast);
}
else if (aliasscope || Order != AtomicOrdering::NotAtomic || (tracked_pointers && rhs.inline_roots.empty())) {
@@ -2401,7 +2389,7 @@ static jl_cgval_t typed_store(jl_codectx_t &ctx,
}
else {
assert(Order == AtomicOrdering::NotAtomic && !isboxed && rhs.typ == jltype);
- emit_unbox_store(ctx, rhs, ptr, tbaa, Align(alignment));
+ emit_unbox_store(ctx, rhs, ptr, tbaa, MaybeAlign(), Align(alignment));
}
}
else if (isswapfield) {
@@ -2450,7 +2438,7 @@ static jl_cgval_t typed_store(jl_codectx_t &ctx,
}
cmp = update_julia_type(ctx, cmp, jltype);
if (intcast) {
- emit_unbox_store(ctx, cmp, intcast, ctx.tbaa().tbaa_stack, intcast->getAlign());
+ emit_unbox_store(ctx, cmp, intcast, ctx.tbaa().tbaa_stack, MaybeAlign(), intcast->getAlign());
Compare = ctx.builder.CreateLoad(realelty, intcast);
}
else {
@@ -2521,7 +2509,7 @@ static jl_cgval_t typed_store(jl_codectx_t &ctx,
r = boxed(ctx, rhs);
}
else if (intcast) {
- emit_unbox_store(ctx, rhs, intcast, ctx.tbaa().tbaa_stack, intcast->getAlign());
+ emit_unbox_store(ctx, rhs, intcast, ctx.tbaa().tbaa_stack, MaybeAlign(), intcast->getAlign());
r = ctx.builder.CreateLoad(realelty, intcast);
if (!tracked_pointers) // oldval is a slot, so put the oldval back
ctx.builder.CreateStore(realCompare, intcast);
@@ -2568,7 +2556,7 @@ static jl_cgval_t typed_store(jl_codectx_t &ctx,
}
else {
assert(!isboxed && rhs.typ == jltype);
- emit_unbox_store(ctx, rhs, ptr, tbaa, Align(alignment));
+ emit_unbox_store(ctx, rhs, ptr, tbaa, MaybeAlign(), Align(alignment));
}
ctx.builder.CreateBr(DoneBB);
instr = load;
@@ -3115,11 +3103,7 @@ static jl_cgval_t emit_getfield_knownidx(jl_codectx_t &ctx, const jl_cgval_t &st
else if (strct.ispointer()) {
auto tbaa = best_field_tbaa(ctx, strct, jt, idx, byte_offset);
Value *staddr = data_pointer(ctx, strct);
- Value *addr;
- if (jl_is_vecelement_type((jl_value_t*)jt) || byte_offset == 0)
- addr = staddr; // VecElement types are unwrapped in LLVM.
- else
- addr = emit_ptrgep(ctx, staddr, byte_offset);
+ Value *addr = (byte_offset == 0 ? staddr : emit_ptrgep(ctx, staddr, byte_offset));
if (addr != staddr)
setNameWithField(ctx.emission_context, addr, get_objname, jt, idx, Twine("_ptr"));
if (jl_field_isptr(jt, idx)) {
@@ -3368,9 +3352,10 @@ static void init_bits_value(jl_codectx_t &ctx, Value *newv, Value *v, MDNode *tb
static void init_bits_cgval(jl_codectx_t &ctx, Value *newv, const jl_cgval_t &v)
{
MDNode *tbaa = jl_is_mutable(v.typ) ? ctx.tbaa().tbaa_mutab : ctx.tbaa().tbaa_immut;
- Align newv_align{std::max(julia_alignment(v.typ), (unsigned)sizeof(void*))};
+ unsigned alignment = julia_alignment(v.typ);
+ unsigned newv_align = std::max(alignment, (unsigned)sizeof(void*));
newv = maybe_decay_tracked(ctx, newv);
- emit_unbox_store(ctx, v, newv, tbaa, newv_align);
+ emit_unbox_store(ctx, v, newv, tbaa, Align(alignment), Align(newv_align));
}
static jl_value_t *static_constant_instance(const llvm::DataLayout &DL, Constant *constant, jl_value_t *jt)
@@ -3583,7 +3568,7 @@ static void union_alloca_type(jl_uniontype_t *ut,
[&](unsigned idx, jl_datatype_t *jt) {
if (!jl_is_datatype_singleton(jt)) {
size_t nb1 = jl_datatype_size(jt);
- size_t align1 = jl_datatype_align(jt);
+ size_t align1 = julia_alignment((jl_value_t*)jt);
if (nb1 > nbytes)
nbytes = nb1;
if (align1 > align)
@@ -3824,7 +3809,7 @@ static void emit_unionmove(jl_codectx_t &ctx, Value *dest, MDNode *tbaa_dst, con
if (jl_is_pointerfree(typ)) {
emit_guarded_test(ctx, skip, nullptr, [&] {
unsigned alignment = julia_alignment(typ);
- emit_unbox_store(ctx, mark_julia_const(ctx, src.constant), dest, tbaa_dst, Align(alignment), isVolatile);
+ emit_unbox_store(ctx, mark_julia_const(ctx, src.constant), dest, tbaa_dst, Align(alignment), Align(alignment), isVolatile);
return nullptr;
});
}
@@ -3834,7 +3819,7 @@ static void emit_unionmove(jl_codectx_t &ctx, Value *dest, MDNode *tbaa_dst, con
if (jl_is_pointerfree(src.typ)) {
emit_guarded_test(ctx, skip, nullptr, [&] {
unsigned alignment = julia_alignment(src.typ);
- emit_unbox_store(ctx, src, dest, tbaa_dst, Align(alignment), isVolatile);
+ emit_unbox_store(ctx, src, dest, tbaa_dst, Align(alignment), Align(alignment), isVolatile);
return nullptr;
});
}
@@ -4023,15 +4008,9 @@ static jl_cgval_t union_store(jl_codectx_t &ctx,
emit_lockstate_value(ctx, needlock, false);
const jl_cgval_t argv[3] = { cmp, oldval, rhs };
if (modifyop) {
- rhs = emit_invoke(ctx, *modifyop, argv, 3, (jl_value_t*)jl_any_type, nullptr);
+ rhs = emit_invoke(ctx, *modifyop, argv, 3, (jl_value_t*)jl_any_type);
}
else {
- if (trim_may_error(ctx.params->trim)) {
- // if we know the return type, we can assume the result is of that type
- errs() << "ERROR: Dynamic call to setfield/modifyfield\n";
- errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n";
- print_stacktrace(ctx, ctx.params->trim);
- }
Value *callval = emit_jlcall(ctx, jlapplygeneric_func, nullptr, argv, 3, julia_call);
rhs = mark_julia_type(ctx, callval, true, jl_any_type);
}
@@ -4135,10 +4114,11 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg
// choose whether we should perform the initialization with the struct as a IR value
// or instead initialize the stack buffer with stores (the later is nearly always better)
+ // although we do the former if it is a vector or could be a vector element
auto tracked = split_value_size(sty);
assert(CountTrackedPointers(lt).count == tracked.second);
bool init_as_value = false;
- if (lt->isVectorTy() || jl_is_vecelement_type(ty)) { // maybe also check the size ?
+ if (lt->isVectorTy() || jl_special_vector_alignment(1, ty) != 0) {
init_as_value = true;
}
@@ -4294,6 +4274,8 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg
}
}
else {
+ Align align_dst(jl_field_align(sty, i));
+ Align align_src(julia_alignment(jtype));
if (field_promotable) {
fval_info.V->replaceAllUsesWith(dest);
cast(fval_info.V)->eraseFromParent();
@@ -4302,10 +4284,10 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg
fval = emit_unbox(ctx, fty, fval_info, jtype);
}
else if (!roots.empty()) {
- split_value_into(ctx, fval_info, Align(julia_alignment(jtype)), dest, Align(jl_field_align(sty, i)), jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack), roots);
+ split_value_into(ctx, fval_info, align_src, dest, align_dst, jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack), roots);
}
else {
- emit_unbox_store(ctx, fval_info, dest, ctx.tbaa().tbaa_stack, Align(jl_field_align(sty, i)));
+ emit_unbox_store(ctx, fval_info, dest, ctx.tbaa().tbaa_stack, align_src, align_dst);
}
}
if (init_as_value) {
@@ -4345,7 +4327,7 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg
if (strct) {
jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack);
promotion_point = ai.decorateInst(ctx.builder.CreateMemSet(strct, ConstantInt::get(getInt8Ty(ctx.builder.getContext()), 0),
- jl_datatype_size(ty), MaybeAlign(jl_datatype_align(ty))));
+ jl_datatype_size(ty), Align(julia_alignment(ty))));
}
ctx.builder.restoreIP(savedIP);
}
@@ -4428,7 +4410,8 @@ static int compare_cgparams(const jl_cgparams_t *a, const jl_cgparams_t *b)
(a->debug_info_kind == b->debug_info_kind) &&
(a->safepoint_on_entry == b->safepoint_on_entry) &&
(a->gcstack_arg == b->gcstack_arg) &&
- (a->use_jlplt == b->use_jlplt);
+ (a->use_jlplt == b->use_jlplt) &&
+ (a->force_emit_all == b->force_emit_all);
}
#endif
@@ -4716,10 +4699,9 @@ static jl_cgval_t emit_memoryref(jl_codectx_t &ctx, const jl_cgval_t &ref, jl_cg
setName(ctx.emission_context, ovflw, "memoryref_ovflw");
}
#endif
- boffset = ctx.builder.CreateMul(offset, elsz);
- setName(ctx.emission_context, boffset, "memoryref_byteoffset");
- newdata = ctx.builder.CreateGEP(getInt8Ty(ctx.builder.getContext()), data, boffset);
- setName(ctx.emission_context, newdata, "memoryref_data_byteoffset");
+ Type *elty = isboxed ? ctx.types().T_prjlvalue : julia_type_to_llvm(ctx, jl_tparam1(ref.typ));
+ newdata = ctx.builder.CreateGEP(elty, data, offset);
+ setName(ctx.emission_context, newdata, "memoryref_data_offset");
(void)boffset; // LLVM is very bad at handling GEP with types different from the load
if (bc) {
BasicBlock *failBB, *endBB;
diff --git a/src/clangsa/GCChecker.cpp b/src/clangsa/GCChecker.cpp
index fdbe5ec9d9e29..af07ca2227839 100644
--- a/src/clangsa/GCChecker.cpp
+++ b/src/clangsa/GCChecker.cpp
@@ -856,7 +856,6 @@ bool GCChecker::isGCTrackedType(QualType QT) {
Name.ends_with_insensitive("jl_stenv_t") ||
Name.ends_with_insensitive("jl_varbinding_t") ||
Name.ends_with_insensitive("set_world") ||
- Name.ends_with_insensitive("jl_ptr_kind_union_t") ||
Name.ends_with_insensitive("jl_codectx_t")) {
return true;
}
diff --git a/src/codegen-stubs.c b/src/codegen-stubs.c
index 6b547251eaab8..04f38fb9091be 100644
--- a/src/codegen-stubs.c
+++ b/src/codegen-stubs.c
@@ -17,7 +17,6 @@ JL_DLLEXPORT void jl_get_llvm_gvs_fallback(void *native_code, arraylist_t *gvs)
JL_DLLEXPORT void jl_get_llvm_external_fns_fallback(void *native_code, arraylist_t *gvs) UNAVAILABLE
JL_DLLEXPORT void jl_get_llvm_mis_fallback(void *native_code, arraylist_t* MIs) UNAVAILABLE
-JL_DLLEXPORT void jl_extern_c_fallback(jl_function_t *f, jl_value_t *rt, jl_value_t *argt, char *name) UNAVAILABLE
JL_DLLEXPORT jl_value_t *jl_dump_method_asm_fallback(jl_method_instance_t *linfo, size_t world,
char emit_mc, char getwrapper, const char* asm_variant, const char *debuginfo, char binary) UNAVAILABLE
JL_DLLEXPORT jl_value_t *jl_dump_function_ir_fallback(jl_llvmf_dump_t *dump, char strip_ir_metadata, char dump_module, const char *debuginfo) UNAVAILABLE
@@ -71,7 +70,7 @@ JL_DLLEXPORT uint32_t jl_get_LLVM_VERSION_fallback(void)
return 0;
}
-JL_DLLEXPORT int jl_compile_extern_c_fallback(LLVMOrcThreadSafeModuleRef llvmmod, void *params, void *sysimg, jl_value_t *declrt, jl_value_t *sigt)
+JL_DLLEXPORT int jl_compile_extern_c_fallback(LLVMOrcThreadSafeModuleRef llvmmod, void *params, void *sysimg, jl_value_t *name, jl_value_t *declrt, jl_value_t *sigt)
{
// Assume we were able to register the ccallable with the JIT. The
// fact that we didn't is not observable since we cannot compile
diff --git a/src/codegen.cpp b/src/codegen.cpp
index e9e4275672c7e..ec1685d9ad7c8 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -278,16 +278,16 @@ extern void _chkstk(void);
// types
struct jl_typecache_t {
- Type *T_ptr;
+ PointerType *T_ptr;
Type *T_size;
Type *T_jlvalue;
- Type *T_pjlvalue;
- Type *T_prjlvalue;
- Type *T_ppjlvalue;
- Type *T_pprjlvalue;
+ PointerType *T_pjlvalue;
+ PointerType *T_prjlvalue;
+ PointerType *T_ppjlvalue;
+ PointerType *T_pprjlvalue;
StructType *T_jlgenericmemory;
StructType *T_jlarray;
- Type *T_pjlarray;
+ PointerType *T_pjlarray;
FunctionType *T_jlfunc;
FunctionType *T_jlfuncparams;
@@ -918,16 +918,15 @@ static const auto jldeclareglobal_func = new JuliaFunction<>{
auto T_pjlvalue = JuliaType::get_pjlvalue_ty(C);
auto T_prjlvalue = JuliaType::get_prjlvalue_ty(C);
return FunctionType::get(getVoidTy(C),
- {T_pjlvalue, T_pjlvalue, T_prjlvalue}, false); },
+ {T_pjlvalue, T_pjlvalue, T_prjlvalue, getInt32Ty(C)}, false); },
nullptr,
};
-static const auto jlgetbindingorerror_func = new JuliaFunction<>{
- XSTR(jl_get_binding_or_error),
+static const auto jldepcheck_func = new JuliaFunction<>{
+ XSTR(jl_binding_deprecation_check),
[](LLVMContext &C) {
auto T_pjlvalue = JuliaType::get_pjlvalue_ty(C);
- return FunctionType::get(T_pjlvalue,
- {T_pjlvalue, T_pjlvalue}, false);
- },
+ return FunctionType::get(getVoidTy(C),
+ {T_pjlvalue}, false); },
nullptr,
};
static const auto jlcheckbpwritable_func = new JuliaFunction<>{
@@ -940,7 +939,7 @@ static const auto jlcheckbpwritable_func = new JuliaFunction<>{
nullptr,
};
static const auto jlgetbindingvalue_func = new JuliaFunction<>{
- XSTR(jl_reresolve_binding_value_seqcst),
+ XSTR(jl_get_binding_value_seqcst),
[](LLVMContext &C) {
auto T_pjlvalue = JuliaType::get_pjlvalue_ty(C);
auto T_prjlvalue = JuliaType::get_prjlvalue_ty(C);
@@ -990,20 +989,12 @@ static const auto jlapplygeneric_func = new JuliaFunction<>{
static const auto jlinvoke_func = new JuliaFunction<>{
XSTR(jl_invoke),
get_func2_sig,
- [](LLVMContext &C) { return AttributeList::get(C,
- AttributeSet(),
- Attributes(C, {Attribute::NonNull}),
- {AttributeSet(),
- Attributes(C, {Attribute::ReadOnly, Attribute::NoCapture})}); },
+ get_func_attrs,
};
static const auto jlinvokeoc_func = new JuliaFunction<>{
XSTR(jl_invoke_oc),
get_func2_sig,
- [](LLVMContext &C) { return AttributeList::get(C,
- AttributeSet(),
- Attributes(C, {Attribute::NonNull}),
- {AttributeSet(),
- Attributes(C, {Attribute::ReadOnly, Attribute::NoCapture})}); },
+ get_func_attrs,
};
static const auto jlopaque_closure_call_func = new JuliaFunction<>{
XSTR(jl_f_opaque_closure_call),
@@ -1027,7 +1018,7 @@ static const auto jlgenericfunction_func = new JuliaFunction<>{
auto T_jlvalue = JuliaType::get_jlvalue_ty(C);
auto T_pjlvalue = PointerType::get(T_jlvalue, 0);
auto T_prjlvalue = PointerType::get(T_jlvalue, AddressSpace::Tracked);
- return FunctionType::get(T_prjlvalue, {T_pjlvalue, T_pjlvalue, T_pjlvalue}, false);
+ return FunctionType::get(T_prjlvalue, {T_pjlvalue, T_pjlvalue}, false);
},
nullptr,
};
@@ -1396,6 +1387,14 @@ static const auto jlgetcfunctiontrampoline_func = new JuliaFunction<>{
Attributes(C, {Attribute::NonNull}),
None); },
};
+static const auto jlgetabiconverter_func = new JuliaFunction{
+ XSTR(jl_get_abi_converter),
+ [](LLVMContext &C, Type *T_size) {
+ Type *T_ptr = getPointerTy(C);
+ return FunctionType::get(T_ptr,
+ {T_ptr, T_ptr, T_ptr, T_ptr}, false); },
+ nullptr,
+};
static const auto diff_gc_total_bytes_func = new JuliaFunction<>{
XSTR(jl_gc_diff_total_bytes),
[](LLVMContext &C) { return FunctionType::get(getInt64Ty(C), false); },
@@ -1462,7 +1461,6 @@ static const auto jlgetbuiltinfptr_func = new JuliaFunction<>{
nullptr,
};
-
// placeholder functions
static const auto gcroot_flush_func = new JuliaFunction<>{
"julia.gcroot_flush",
@@ -1596,9 +1594,8 @@ static const auto &builtin_func_map() {
{ jl_f_typeassert_addr, new JuliaFunction<>{XSTR(jl_f_typeassert), get_func_sig, get_func_attrs} },
{ jl_f_ifelse_addr, new JuliaFunction<>{XSTR(jl_f_ifelse), get_func_sig, get_func_attrs} },
{ jl_f__apply_iterate_addr, new JuliaFunction<>{XSTR(jl_f__apply_iterate), get_func_sig, get_func_attrs} },
- { jl_f__apply_pure_addr, new JuliaFunction<>{XSTR(jl_f__apply_pure), get_func_sig, get_func_attrs} },
- { jl_f__call_latest_addr, new JuliaFunction<>{XSTR(jl_f__call_latest), get_func_sig, get_func_attrs} },
- { jl_f__call_in_world_addr, new JuliaFunction<>{XSTR(jl_f__call_in_world), get_func_sig, get_func_attrs} },
+ { jl_f_invokelatest_addr, new JuliaFunction<>{XSTR(jl_f_invokelatest), get_func_sig, get_func_attrs} },
+ { jl_f_invoke_in_world_addr, new JuliaFunction<>{XSTR(jl_f_invoke_in_world), get_func_sig, get_func_attrs} },
{ jl_f__call_in_world_total_addr, new JuliaFunction<>{XSTR(jl_f__call_in_world_total), get_func_sig, get_func_attrs} },
{ jl_f_throw_addr, new JuliaFunction<>{XSTR(jl_f_throw), get_func_sig, get_func_attrs} },
{ jl_f_throw_methoderror_addr, new JuliaFunction<>{XSTR(jl_f_throw_methoderror), get_func_sig, get_func_attrs} },
@@ -1634,17 +1631,6 @@ static const auto &builtin_func_map() {
return *builtins;
}
-static const auto &may_dispatch_builtins() {
- static auto builtins = new DenseSet(
- {jl_f__apply_iterate_addr,
- jl_f__apply_pure_addr,
- jl_f__call_in_world_addr,
- jl_f__call_in_world_total_addr,
- jl_f__call_latest_addr,
- });
- return *builtins;
-}
-
static const auto jl_new_opaque_closure_jlcall_func = new JuliaFunction<>{XSTR(jl_new_opaque_closure_jlcall), get_func_sig, get_func_attrs};
static _Atomic(uint64_t) globalUniqueGeneratedNames{1};
@@ -1671,7 +1657,7 @@ static MDNode *best_tbaa(jl_tbaacache_t &tbaa_cache, jl_value_t *jt) {
// note that this includes jl_isbits, although codegen should work regardless
static bool jl_is_concrete_immutable(jl_value_t* t)
{
- return jl_is_immutable_datatype(t) && ((jl_datatype_t*)t)->isconcretetype;
+ return jl_may_be_immutable_datatype(t) && ((jl_datatype_t*)t)->isconcretetype;
}
static bool jl_is_pointerfree(jl_value_t* t)
@@ -2095,7 +2081,7 @@ jl_aliasinfo_t jl_aliasinfo_t::fromTBAA(jl_codectx_t &ctx, MDNode *tbaa) {
}
static Type *julia_type_to_llvm(jl_codectx_t &ctx, jl_value_t *jt, bool *isboxed = NULL);
-static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value *fval, StringRef name, jl_value_t *sig, jl_value_t *jlrettype, bool is_opaque_closure, bool gcstack_arg,
+static jl_returninfo_t get_specsig_function(jl_codegen_params_t &ctx, Module *M, Value *fval, StringRef name, jl_value_t *sig, jl_value_t *jlrettype, bool is_opaque_closure,
ArrayRef ArgNames=None, unsigned nreq=0);
static jl_cgval_t emit_expr(jl_codectx_t &ctx, jl_value_t *expr, ssize_t ssaval = -1);
static jl_cgval_t emit_checked_var(jl_codectx_t &ctx, Value *bp, jl_sym_t *name, jl_value_t *scope, bool isvol, MDNode *tbaa);
@@ -2107,195 +2093,19 @@ static Value *get_tls_world_age(jl_codectx_t &ctx);
static Value *get_scope_field(jl_codectx_t &ctx);
static Value *get_tls_world_age_field(jl_codectx_t &ctx);
static void CreateTrap(IRBuilder<> &irbuilder, bool create_new_block = true);
-static CallInst *emit_jlcall(jl_codectx_t &ctx, FunctionCallee theFptr, Value *theF,
+static CallInst *emit_jlcall(jl_codectx_t &ctx, Value *theFptr, Value *theF,
ArrayRef args, size_t nargs, JuliaFunction<> *trampoline);
static CallInst *emit_jlcall(jl_codectx_t &ctx, JuliaFunction<> *theFptr, Value *theF,
ArrayRef args, size_t nargs, JuliaFunction<> *trampoline);
static Value *emit_f_is(jl_codectx_t &ctx, const jl_cgval_t &arg1, const jl_cgval_t &arg2,
Value *nullcheck1 = nullptr, Value *nullcheck2 = nullptr);
static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t nargs, ArrayRef argv, bool is_promotable=false);
-static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, ArrayRef argv, size_t nargs, jl_value_t *rt, Value *age_ok);
+static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, ArrayRef argv, size_t nargs, jl_value_t *rt);
static Value *literal_pointer_val(jl_codectx_t &ctx, jl_value_t *p);
static unsigned julia_alignment(jl_value_t *jt);
static void recombine_value(jl_codectx_t &ctx, const jl_cgval_t &x, Value *dst, jl_aliasinfo_t const &dst_ai, Align alignment, bool isVolatile);
-static void print_stack_crumbs(jl_codectx_t &ctx)
-{
- errs() << "\n";
- errs() << "Stacktrace:\n";
- jl_method_instance_t *caller = ctx.linfo;
- jl_((jl_value_t*)caller);
- errs() << "In " << ctx.file << ":" << ctx.line << "\n";
- while (true) {
- auto it = ctx.emission_context.enqueuers.find(caller);
- if (it != ctx.emission_context.enqueuers.end()) {
- caller = std::get(it->second);
- } else {
- break;
- }
- if (caller) {
- if (jl_is_method_instance(caller)) {
- for (auto it2 = std::get(it->second).begin(); it2 != (std::prev(std::get(it->second).end())); ++it2) {
- auto frame = *it2;
- errs() << std::get<0>(frame) << " \n";
- errs() << "In " << std::get<1>(frame) << ":" << std::get(frame) << "\n";
- }
- auto &frame = std::get(it->second).front();
- jl_((jl_value_t*)caller);
- errs() << "In " << std::get<1>(frame) << ":" << std::get(frame) << "\n";
- }
- }
- else
- break;
- }
- abort();
-}
-
-static jl_value_t *StackFrame(
- jl_value_t *linfo,
- std::string fn_name,
- std::string filepath,
- int32_t lineno,
- jl_value_t *inlined)
-{
- jl_value_t *StackFrame = jl_get_global(jl_base_module, jl_symbol("StackFrame"));
- assert(StackFrame != nullptr);
-
- jl_value_t *args[7] = {
- /* func */ (jl_value_t *)jl_symbol(fn_name.c_str()),
- /* line */ (jl_value_t *)jl_symbol(filepath.c_str()),
- /* line */ jl_box_int32(lineno),
- /* linfo */ (jl_value_t *)linfo,
- /* from_c */ jl_false,
- /* inlined */ inlined,
- /* pointer */ jl_box_uint64(0)
- };
-
- jl_value_t *frame = nullptr;
- JL_TRY {
- frame = jl_apply_generic(StackFrame, args, 7);
- } JL_CATCH {
- jl_safe_printf("Error creating stack frame\n");
- }
- return frame;
-}
-
-static void push_frames(jl_codectx_t &ctx, jl_method_instance_t *caller, jl_method_instance_t *callee)
-{
- CallFrames frames;
- auto it = ctx.emission_context.enqueuers.find(callee);
- if (it != ctx.emission_context.enqueuers.end())
- return;
- auto DL = ctx.builder.getCurrentDebugLocation();
- if (caller == nullptr || !DL) { // Used in various places
- frames.push_back({ctx.funcName, "", 0});
- ctx.emission_context.enqueuers.insert({callee, {caller, std::move(frames)}});
- return;
- }
- auto filename = std::string(DL->getFilename());
- auto line = DL->getLine();
- auto fname = std::string(DL->getScope()->getSubprogram()->getName());
- frames.push_back({fname, filename, line});
- auto DI = DL.getInlinedAt();
- while (DI) {
- auto filename = std::string(DI->getFilename());
- auto line = DI->getLine();
- auto fname = std::string(DI->getScope()->getSubprogram()->getName());
- frames.push_back({fname, filename, line});
- DI = DI->getInlinedAt();
- }
- ctx.emission_context.enqueuers.insert({callee, {caller, std::move(frames)}});
-}
-
-static jl_array_t* build_stack_crumbs(jl_codectx_t &ctx) JL_NOTSAFEPOINT
-{
- static intptr_t counter = 5;
- jl_method_instance_t *caller = (jl_method_instance_t*)counter; //nothing serves as a sentinel for the bottom for the stack
- push_frames(ctx, ctx.linfo, (jl_method_instance_t*)caller);
- counter++;
- jl_array_t *out = jl_alloc_array_1d(jl_array_any_type, 0);
- JL_GC_PUSH1(&out);
- while (true) {
- auto it = ctx.emission_context.enqueuers.find(caller);
- if (it != ctx.emission_context.enqueuers.end()) {
- caller = std::get(it->second);
- } else {
- break;
- }
- if (caller) {
-
- // assert(ctx.emission_context.enqueuers.count(caller) == 1);
- // Each enqueuer should only be enqueued at least once and only once. Check why this assert is triggering
- // This isn't a fatal error, just means that we may get a wrong backtrace
- if (jl_is_method_instance(caller)) {
- //TODO: Use a subrange when C++20 is a thing
- for (auto it2 = std::get(it->second).begin(); it2 != (std::prev(std::get(it->second).end())); ++it2) {
- auto frame = *it2;
- jl_value_t *stackframe = StackFrame(jl_nothing, std::get<0>(frame), std::get<1>(frame), std::get(frame), jl_true);
- if (stackframe == nullptr)
- print_stack_crumbs(ctx);
- jl_array_ptr_1d_push(out, stackframe);
- }
- auto &frame = std::get(it->second).back();
- jl_value_t *stackframe = StackFrame((jl_value_t *)caller, std::get<0>(frame), std::get<1>(frame), std::get(frame), jl_false);
- if (stackframe == nullptr)
- print_stack_crumbs(ctx);
- jl_array_ptr_1d_push(out, stackframe);
- }
- }
- else
- break;
- }
- JL_GC_POP();
- return out;
-}
-
-static void print_stacktrace(jl_codectx_t &ctx, int trim)
-{
- jl_task_t *ct = jl_get_current_task();
- assert(ct);
-
- // Temporarily operate in the current age
- size_t last_age = ct->world_age;
- ct->world_age = jl_get_world_counter();
- jl_array_t* bt = build_stack_crumbs(ctx);
- JL_GC_PUSH1(&bt);
-
- // Call `reinit_stdio` to get TTY IO objects (w/ color)
- jl_value_t *reinit_stdio = jl_get_global(jl_base_module, jl_symbol("_reinit_stdio"));
- assert(reinit_stdio);
- jl_apply_generic(reinit_stdio, nullptr, 0);
-
- // Show the backtrace
- jl_value_t *show_backtrace = jl_get_global(jl_base_module, jl_symbol("show_backtrace"));
- jl_value_t *base_stderr = jl_get_global(jl_base_module, jl_symbol("stderr"));
- assert(show_backtrace && base_stderr);
-
- JL_TRY {
- jl_value_t *args[2] = { base_stderr, (jl_value_t *)bt };
- jl_apply_generic(show_backtrace, args, 2);
- } JL_CATCH {
- jl_printf(JL_STDERR,"Error showing backtrace\n");
- print_stack_crumbs(ctx);
- }
-
- jl_printf(JL_STDERR, "\n\n");
- JL_GC_POP();
- ct->world_age = last_age;
-
- if (trim == JL_TRIM_SAFE) {
- jl_printf(JL_STDERR,"Aborting compilation due to finding a dynamic dispatch");
- exit(1);
- }
- return;
-}
-
-static int trim_may_error(int trim)
-{
- return (trim == JL_TRIM_SAFE) || (trim == JL_TRIM_UNSAFE_WARN);
-}
-
static GlobalVariable *prepare_global_in(Module *M, JuliaVariable *G)
{
return G->realize(M);
@@ -2914,7 +2724,7 @@ static jl_cgval_t convert_julia_type(jl_codectx_t &ctx, const jl_cgval_t &v, jl_
return jl_cgval_t(v, typ, new_tindex);
}
-std::unique_ptr jl_create_llvm_module(StringRef name, LLVMContext &context, const DataLayout &DL, const Triple &triple)
+std::unique_ptr jl_create_llvm_module(StringRef name, LLVMContext &context, const DataLayout &DL, const Triple &triple) JL_NOTSAFEPOINT
{
++ModulesCreated;
auto m = std::make_unique(name, context);
@@ -2941,14 +2751,16 @@ std::unique_ptr jl_create_llvm_module(StringRef name, LLVMContext &conte
return m;
}
-static void jl_name_jlfunc_args(jl_codegen_params_t ¶ms, Function *F) {
+static void jl_name_jlfunc_args(jl_codegen_params_t ¶ms, Function *F) JL_NOTSAFEPOINT
+{
assert(F->arg_size() == 3);
F->getArg(0)->setName("function::Core.Function");
F->getArg(1)->setName("args::Any[]");
F->getArg(2)->setName("nargs::UInt32");
}
-static void jl_name_jlfuncparams_args(jl_codegen_params_t ¶ms, Function *F) {
+static void jl_name_jlfuncparams_args(jl_codegen_params_t ¶ms, Function *F) JL_NOTSAFEPOINT
+{
assert(F->arg_size() == 4);
F->getArg(0)->setName("function::Core.Function");
F->getArg(1)->setName("args::Any[]");
@@ -2956,7 +2768,7 @@ static void jl_name_jlfuncparams_args(jl_codegen_params_t ¶ms, Function *F)
F->getArg(3)->setName("sparams::Any");
}
-void jl_init_function(Function *F, const Triple &TT)
+void jl_init_function(Function *F, const Triple &TT) JL_NOTSAFEPOINT
{
// set any attributes that *must* be set on all functions
AttrBuilder attr(F->getContext());
@@ -3020,6 +2832,7 @@ static bool uses_specsig(jl_value_t *sig, bool needsparams, jl_value_t *rettype,
bool allSingleton = true;
for (size_t i = 0; i < jl_nparams(sig); i++) {
jl_value_t *sigt = jl_tparam(sig, i);
+ // TODO: sigt = unwrap_va(sigt)
bool issing = jl_is_datatype(sigt) && jl_is_datatype_singleton((jl_datatype_t*)sigt);
allSingleton &= issing;
if (!deserves_argbox(sigt) && !issing) {
@@ -3090,20 +2903,6 @@ static void mallocVisitLine(jl_codectx_t &ctx, StringRef filename, int line, Val
// --- constant determination ---
-static void show_source_loc(jl_codectx_t &ctx, JL_STREAM *out)
-{
- jl_printf(out, "in %s at %s", ctx.name, ctx.file.str().c_str());
-}
-
-static void cg_bdw(jl_codectx_t &ctx, jl_sym_t *var, jl_binding_t *b)
-{
- jl_binding_deprecation_warning(ctx.module, var, b);
- if (b->deprecated == 1 && jl_options.depwarn) {
- show_source_loc(ctx, JL_STDERR);
- jl_printf(JL_STDERR, "\n");
- }
-}
-
static jl_value_t *static_apply_type(jl_codectx_t &ctx, ArrayRef args, size_t nargs)
{
assert(nargs > 1);
@@ -3128,17 +2927,26 @@ static jl_value_t *static_apply_type(jl_codectx_t &ctx, ArrayRef arg
return result;
}
+static void emit_depwarn_check(jl_codectx_t &ctx, jl_binding_t *b)
+{
+ Value *bp = julia_binding_gv(ctx, b);
+ ctx.builder.CreateCall(prepare_call(jldepcheck_func), { bp });
+}
+
// try to statically evaluate, NULL if not possible. note that this may allocate, and as
// such the resulting value should not be embedded directly in the generated code.
static jl_value_t *static_eval(jl_codectx_t &ctx, jl_value_t *ex)
{
if (jl_is_symbol(ex)) {
jl_sym_t *sym = (jl_sym_t*)ex;
- jl_binding_t *bnd = jl_get_module_binding(ctx.module, sym, 0);
- jl_binding_partition_t *bpart = jl_get_binding_partition_all(bnd, ctx.min_world, ctx.max_world);
- jl_ptr_kind_union_t pku = jl_walk_binding_inplace_all(&bnd, &bpart, ctx.min_world, ctx.max_world);
- if (jl_bkind_is_some_constant(decode_restriction_kind(pku)))
- return decode_restriction_value(pku);
+ jl_binding_t *bnd = jl_get_module_binding(ctx.module, sym, 1);
+ int possibly_deprecated = 0;
+ jl_value_t *cval = jl_get_binding_leaf_partitions_value_if_const(bnd, &possibly_deprecated, ctx.min_world, ctx.max_world);
+ if (cval) {
+ if (possibly_deprecated)
+ emit_depwarn_check(ctx, bnd);
+ return cval;
+ }
return NULL;
}
if (jl_is_slotnumber(ex) || jl_is_argument(ex))
@@ -3159,15 +2967,12 @@ static jl_value_t *static_eval(jl_codectx_t &ctx, jl_value_t *ex)
jl_sym_t *s = NULL;
if (jl_is_globalref(ex)) {
s = jl_globalref_name(ex);
- jl_binding_t *bnd = jl_get_module_binding(jl_globalref_mod(ex), s, 0);
- jl_binding_partition_t *bpart = jl_get_binding_partition_all(bnd, ctx.min_world, ctx.max_world);
- jl_ptr_kind_union_t pku = jl_walk_binding_inplace_all(&bnd, &bpart, ctx.min_world, ctx.max_world);
- jl_value_t *v = NULL;
- if (jl_bkind_is_some_constant(decode_restriction_kind(pku)))
- v = decode_restriction_value(pku);
+ jl_binding_t *bnd = jl_get_module_binding(jl_globalref_mod(ex), s, 1);
+ int possibly_deprecated = 0;
+ jl_value_t *v = jl_get_binding_leaf_partitions_value_if_const(bnd, &possibly_deprecated, ctx.min_world, ctx.max_world);
if (v) {
- if (bnd->deprecated)
- cg_bdw(ctx, s, bnd);
+ if (possibly_deprecated)
+ emit_depwarn_check(ctx, bnd);
return v;
}
return NULL;
@@ -3186,15 +2991,12 @@ static jl_value_t *static_eval(jl_codectx_t &ctx, jl_value_t *ex)
// Assumes that the module is rooted somewhere.
s = (jl_sym_t*)static_eval(ctx, jl_exprarg(e, 2));
if (s && jl_is_symbol(s)) {
- jl_binding_t *bnd = jl_get_module_binding(m, s, 0);
- jl_binding_partition_t *bpart = jl_get_binding_partition_all(bnd, ctx.min_world, ctx.max_world);
- jl_ptr_kind_union_t pku = jl_walk_binding_inplace_all(&bnd, &bpart, ctx.min_world, ctx.max_world);
- jl_value_t *v = NULL;
- if (jl_bkind_is_some_constant(decode_restriction_kind(pku)))
- v = decode_restriction_value(pku);
+ jl_binding_t *bnd = jl_get_module_binding(m, s, 1);
+ int possibly_deprecated = 0;
+ jl_value_t *v = jl_get_binding_leaf_partitions_value_if_const(bnd, &possibly_deprecated, ctx.min_world, ctx.max_world);
if (v) {
- if (bnd->deprecated)
- cg_bdw(ctx, s, bnd);
+ if (possibly_deprecated)
+ emit_depwarn_check(ctx, bnd);
return v;
}
}
@@ -3255,7 +3057,6 @@ static bool slot_eq(jl_value_t *e, int sl)
// --- find volatile variables ---
// assigned in a try block and used outside that try block
-
static bool local_var_occurs(jl_value_t *e, int sl)
{
if (slot_eq(e, sl)) {
@@ -3295,13 +3096,13 @@ static bool have_try_block(jl_array_t *stmts)
return 0;
}
-// conservative marking of all variables potentially used after a catch block that were assigned before it
+// conservative marking of all variables potentially used after a catch block that were assigned after the try
static void mark_volatile_vars(jl_array_t *stmts, SmallVectorImpl &slots, const std::set &bbstarts)
{
if (!have_try_block(stmts))
return;
size_t slength = jl_array_dim0(stmts);
- BitVector assigned_in_block(slots.size()); // conservatively only ignore slots assigned in the same basic block
+ BitVector assigned_in_block(slots.size()); // since we don't have domtree access, conservatively only ignore slots assigned in the same basic block
for (int j = 0; j < (int)slength; j++) {
if (bbstarts.count(j + 1))
assigned_in_block.reset();
@@ -3436,59 +3237,36 @@ static jl_cgval_t emit_globalref_runtime(jl_codectx_t &ctx, jl_binding_t *bnd, j
static jl_cgval_t emit_globalref(jl_codectx_t &ctx, jl_module_t *mod, jl_sym_t *name, AtomicOrdering order)
{
jl_binding_t *bnd = jl_get_module_binding(mod, name, 1);
- assert(bnd);
- jl_binding_partition_t *bpart = jl_get_binding_partition_all(bnd, ctx.min_world, ctx.max_world);
- if (!bpart) {
+ struct restriction_kind_pair rkp = { NULL, NULL, PARTITION_KIND_GUARD, 0 };
+ if (!jl_get_binding_leaf_partitions_restriction_kind(bnd, &rkp, ctx.min_world, ctx.max_world)) {
return emit_globalref_runtime(ctx, bnd, mod, name);
}
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
- if (jl_bkind_is_some_guard(decode_restriction_kind(pku))) {
- // try to look this up now.
- // TODO: This is bad and we'd like to delete it.
- jl_get_binding(mod, name);
- }
- // bpart was updated in place - this will change with full partition
- pku = jl_atomic_load_acquire(&bpart->restriction);
- if (jl_bkind_is_some_guard(decode_restriction_kind(pku))) {
- // Redo the lookup at runtime
- return emit_globalref_runtime(ctx, bnd, mod, name);
- } else {
- while (true) {
- if (!bpart)
- break;
- if (!jl_bkind_is_some_import(decode_restriction_kind(pku)))
- break;
- if (bnd->deprecated) {
- cg_bdw(ctx, name, bnd);
- }
- bnd = (jl_binding_t*)decode_restriction_value(pku);
- bpart = jl_get_binding_partition_all(bnd, ctx.min_world, ctx.max_world);
- if (!bpart)
- break;
- pku = jl_atomic_load_acquire(&bpart->restriction);
+ if (jl_bkind_is_some_constant(rkp.kind) && rkp.kind != PARTITION_KIND_BACKDATED_CONST) {
+ if (rkp.maybe_depwarn) {
+ Value *bp = julia_binding_gv(ctx, bnd);
+ ctx.builder.CreateCall(prepare_call(jldepcheck_func), { bp });
}
- enum jl_partition_kind kind = decode_restriction_kind(pku);
- if (bpart && (jl_bkind_is_some_constant(kind) && kind != BINDING_KIND_BACKDATED_CONST)) {
- jl_value_t *constval = decode_restriction_value(pku);
- if (!constval) {
- undef_var_error_ifnot(ctx, ConstantInt::get(getInt1Ty(ctx.builder.getContext()), 0), name, (jl_value_t*)mod);
- return jl_cgval_t();
- }
- return mark_julia_const(ctx, constval);
+ jl_value_t *constval = rkp.restriction;
+ if (!constval) {
+ undef_var_error_ifnot(ctx, ConstantInt::get(getInt1Ty(ctx.builder.getContext()), 0), name, (jl_value_t*)mod);
+ return jl_cgval_t();
}
+ return mark_julia_const(ctx, constval);
}
- if (!bpart || decode_restriction_kind(pku) != BINDING_KIND_GLOBAL) {
+ if (rkp.kind != PARTITION_KIND_GLOBAL) {
return emit_globalref_runtime(ctx, bnd, mod, name);
}
Value *bp = julia_binding_gv(ctx, bnd);
- if (bnd->deprecated) {
- cg_bdw(ctx, name, bnd);
+ if (rkp.maybe_depwarn) {
+ ctx.builder.CreateCall(prepare_call(jldepcheck_func), { bp });
}
- jl_value_t *ty = decode_restriction_value(pku);
- bp = julia_binding_pvalue(ctx, bp);
+ if (bnd != rkp.binding_if_global)
+ bp = julia_binding_gv(ctx, rkp.binding_if_global);
+ jl_value_t *ty = rkp.restriction;
+ Value *bpval = julia_binding_pvalue(ctx, bp);
if (ty == nullptr)
ty = (jl_value_t*)jl_any_type;
- return update_julia_type(ctx, emit_checked_var(ctx, bp, name, (jl_value_t*)mod, false, ctx.tbaa().tbaa_binding), ty);
+ return update_julia_type(ctx, emit_checked_var(ctx, bpval, name, (jl_value_t*)mod, false, ctx.tbaa().tbaa_binding), ty);
}
static jl_cgval_t emit_globalop(jl_codectx_t &ctx, jl_module_t *mod, jl_sym_t *sym, jl_cgval_t rval, const jl_cgval_t &cmp,
@@ -3500,9 +3278,9 @@ static jl_cgval_t emit_globalop(jl_codectx_t &ctx, jl_module_t *mod, jl_sym_t *s
jl_binding_partition_t *bpart = jl_get_binding_partition_all(bnd, ctx.min_world, ctx.max_world);
Value *bp = julia_binding_gv(ctx, bnd);
if (bpart) {
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
- if (decode_restriction_kind(pku) == BINDING_KIND_GLOBAL) {
- jl_value_t *ty = decode_restriction_value(pku);
+ if (jl_binding_kind(bpart) == PARTITION_KIND_GLOBAL) {
+ int possibly_deprecated = bpart->kind & PARTITION_FLAG_DEPWARN;
+ jl_value_t *ty = bpart->restriction;
if (ty != nullptr) {
const std::string fname = issetglobal ? "setglobal!" : isreplaceglobal ? "replaceglobal!" : isswapglobal ? "swapglobal!" : ismodifyglobal ? "modifyglobal!" : "setglobalonce!";
if (!ismodifyglobal) {
@@ -3514,6 +3292,9 @@ static jl_cgval_t emit_globalop(jl_codectx_t &ctx, jl_module_t *mod, jl_sym_t *s
}
bool isboxed = true;
bool maybe_null = jl_atomic_load_relaxed(&bnd->value) == NULL;
+ if (possibly_deprecated) {
+ ctx.builder.CreateCall(prepare_call(jldepcheck_func), { bp });
+ }
return typed_store(ctx,
julia_binding_pvalue(ctx, bp),
rval, cmp, ty,
@@ -3647,61 +3428,6 @@ static Value *emit_bitsunion_compare(jl_codectx_t &ctx, const jl_cgval_t &arg1,
return phi;
}
-struct egal_desc {
- size_t offset;
- size_t nrepeats;
- size_t data_bytes;
- size_t padding_bytes;
-};
-
-template
-static size_t emit_masked_bits_compare(callback &emit_desc, jl_datatype_t *aty, egal_desc ¤t_desc)
-{
- // Memcmp, but with masked padding
- size_t data_bytes = 0;
- size_t padding_bytes = 0;
- size_t nfields = jl_datatype_nfields(aty);
- size_t total_size = jl_datatype_size(aty);
- assert(aty->layout->flags.isbitsegal);
- for (size_t i = 0; i < nfields; ++i) {
- size_t offset = jl_field_offset(aty, i);
- size_t fend = i == nfields - 1 ? total_size : jl_field_offset(aty, i + 1);
- size_t fsz = jl_field_size(aty, i);
- jl_datatype_t *fty = (jl_datatype_t*)jl_field_type(aty, i);
- assert(jl_is_datatype(fty)); // union fields should never reach here
- assert(fty->layout->flags.isbitsegal);
- if (jl_field_isptr(aty, i) || !fty->layout->flags.haspadding) {
- // The field has no internal padding
- data_bytes += fsz;
- if (offset + fsz == fend) {
- // The field has no padding after. Merge this into the current
- // comparison range and go to next field.
- } else {
- padding_bytes = fend - offset - fsz;
- // Found padding. Either merge this into the current comparison
- // range, or emit the old one and start a new one.
- if (current_desc.data_bytes == data_bytes &&
- current_desc.padding_bytes == padding_bytes) {
- // Same as the previous range, just note that down, so we
- // emit this as a loop.
- current_desc.nrepeats += 1;
- } else {
- if (current_desc.nrepeats != 0)
- emit_desc(current_desc);
- current_desc.nrepeats = 1;
- current_desc.data_bytes = data_bytes;
- current_desc.padding_bytes = padding_bytes;
- }
- data_bytes = 0;
- }
- } else {
- // The field may have internal padding. Recurse this.
- data_bytes += emit_masked_bits_compare(emit_desc, fty, current_desc);
- }
- }
- return data_bytes;
-}
-
static Value *emit_bits_compare(jl_codectx_t &ctx, jl_cgval_t arg1, jl_cgval_t arg2)
{
++EmittedBitsCompares;
@@ -3778,92 +3504,6 @@ static Value *emit_bits_compare(jl_codectx_t &ctx, jl_cgval_t arg1, jl_cgval_t a
}
return ctx.builder.CreateICmpEQ(answer, ConstantInt::get(getInt32Ty(ctx.builder.getContext()), 0));
}
- else if (sz > 512 && jl_struct_try_layout(sty) && sty->layout->flags.isbitsegal) {
- Value *varg1 = arg1.inline_roots.empty() && arg1.ispointer() ? data_pointer(ctx, arg1) :
- value_to_pointer(ctx, arg1).V;
- Value *varg2 = arg2.inline_roots.empty() && arg2.ispointer() ? data_pointer(ctx, arg2) :
- value_to_pointer(ctx, arg2).V;
- varg1 = emit_pointer_from_objref(ctx, varg1);
- varg2 = emit_pointer_from_objref(ctx, varg2);
-
- // See above for why we want to do this
- SmallVector gc_uses;
- gc_uses.append(get_gc_roots_for(ctx, arg1));
- gc_uses.append(get_gc_roots_for(ctx, arg2));
- OperandBundleDef OpBundle("jl_roots", gc_uses);
-
- Value *answer = nullptr;
- auto emit_desc = [&](egal_desc desc) {
- Value *ptr1 = varg1;
- Value *ptr2 = varg2;
- if (desc.offset != 0) {
- ptr1 = emit_ptrgep(ctx, ptr1, desc.offset);
- ptr2 = emit_ptrgep(ctx, ptr2, desc.offset);
- }
-
- Value *new_ptr1 = ptr1;
- Value *endptr1 = nullptr;
- BasicBlock *postBB = nullptr;
- BasicBlock *loopBB = nullptr;
- PHINode *answerphi = nullptr;
- if (desc.nrepeats != 1) {
- // Set up loop
- endptr1 = emit_ptrgep(ctx, ptr1, desc.nrepeats * (desc.data_bytes + desc.padding_bytes));;
-
- BasicBlock *currBB = ctx.builder.GetInsertBlock();
- loopBB = BasicBlock::Create(ctx.builder.getContext(), "egal_loop", ctx.f);
- postBB = BasicBlock::Create(ctx.builder.getContext(), "post", ctx.f);
- ctx.builder.CreateBr(loopBB);
-
- ctx.builder.SetInsertPoint(loopBB);
- Type *TInt1 = getInt1Ty(ctx.builder.getContext());
- answerphi = ctx.builder.CreatePHI(TInt1, 2);
- answerphi->addIncoming(answer ? answer : ConstantInt::get(TInt1, 1), currBB);
- answer = answerphi;
-
- PHINode *itr1 = ctx.builder.CreatePHI(ptr1->getType(), 2);
- PHINode *itr2 = ctx.builder.CreatePHI(ptr2->getType(), 2);
-
- new_ptr1 = emit_ptrgep(ctx, itr1, desc.data_bytes + desc.padding_bytes);
- itr1->addIncoming(ptr1, currBB);
- itr1->addIncoming(new_ptr1, loopBB);
-
- Value *new_ptr2 = emit_ptrgep(ctx, itr2, desc.data_bytes + desc.padding_bytes);
- itr2->addIncoming(ptr2, currBB);
- itr2->addIncoming(new_ptr2, loopBB);
-
- ptr1 = itr1;
- ptr2 = itr2;
- }
-
- // Emit memcmp. TODO: LLVM has a pass to expand this for additional
- // performance.
- Value *this_answer = ctx.builder.CreateCall(prepare_call(memcmp_func),
- { ptr1,
- ptr2,
- ConstantInt::get(ctx.types().T_size, desc.data_bytes) },
- ArrayRef(&OpBundle, gc_uses.empty() ? 0 : 1));
- this_answer = ctx.builder.CreateICmpEQ(this_answer, ConstantInt::get(getInt32Ty(ctx.builder.getContext()), 0));
- answer = answer ? ctx.builder.CreateAnd(answer, this_answer) : this_answer;
- if (endptr1) {
- answerphi->addIncoming(answer, loopBB);
- Value *loopend = ctx.builder.CreateICmpEQ(new_ptr1, endptr1);
- ctx.builder.CreateCondBr(loopend, postBB, loopBB);
- ctx.builder.SetInsertPoint(postBB);
- }
- };
- egal_desc current_desc = {0};
- size_t trailing_data_bytes = emit_masked_bits_compare(emit_desc, sty, current_desc);
- assert(current_desc.nrepeats != 0);
- emit_desc(current_desc);
- if (trailing_data_bytes != 0) {
- current_desc.nrepeats = 1;
- current_desc.data_bytes = trailing_data_bytes;
- current_desc.padding_bytes = 0;
- emit_desc(current_desc);
- }
- return answer;
- }
else {
jl_svec_t *types = sty->types;
Value *answer = ConstantInt::get(getInt1Ty(ctx.builder.getContext()), 1);
@@ -4184,30 +3824,29 @@ static bool emit_f_opfield(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f,
static jl_cgval_t emit_isdefinedglobal(jl_codectx_t &ctx, jl_module_t *modu, jl_sym_t *name, int allow_import, enum jl_memory_order order)
{
- Value *isnull = NULL;
jl_binding_t *bnd = allow_import ? jl_get_binding(modu, name) : jl_get_module_binding(modu, name, 0);
- jl_binding_partition_t *bpart = jl_get_binding_partition_all(bnd, ctx.min_world, ctx.max_world);
- jl_ptr_kind_union_t pku = bpart ? jl_atomic_load_relaxed(&bpart->restriction) : encode_restriction(NULL, BINDING_KIND_GUARD);
- if (decode_restriction_kind(pku) == BINDING_KIND_GLOBAL || jl_bkind_is_some_constant(decode_restriction_kind(pku))) {
- if (jl_get_binding_value_if_const(bnd))
+ struct restriction_kind_pair rkp = { NULL, NULL, PARTITION_KIND_GUARD, 0 };
+ if (allow_import && jl_get_binding_leaf_partitions_restriction_kind(bnd, &rkp, ctx.min_world, ctx.max_world)) {
+ if (jl_bkind_is_some_constant(rkp.kind) && rkp.restriction)
return mark_julia_const(ctx, jl_true);
- Value *bp = julia_binding_gv(ctx, bnd);
- bp = julia_binding_pvalue(ctx, bp);
- LoadInst *v = ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, bp, Align(sizeof(void*)));
- jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_binding);
- ai.decorateInst(v);
- v->setOrdering(get_llvm_atomic_order(order));
- isnull = ctx.builder.CreateICmpNE(v, Constant::getNullValue(ctx.types().T_prjlvalue));
- }
- else {
- Value *v = ctx.builder.CreateCall(prepare_call(jlboundp_func), {
- literal_pointer_val(ctx, (jl_value_t*)modu),
- literal_pointer_val(ctx, (jl_value_t*)name),
- ConstantInt::get(getInt32Ty(ctx.builder.getContext()), allow_import)
- });
- isnull = ctx.builder.CreateICmpNE(v, ConstantInt::get(getInt32Ty(ctx.builder.getContext()), 0));
- }
- return mark_julia_type(ctx, isnull, false, jl_bool_type);
+ if (rkp.kind == PARTITION_KIND_GLOBAL) {
+ Value *bp = julia_binding_gv(ctx, rkp.binding_if_global);
+ bp = julia_binding_pvalue(ctx, bp);
+ LoadInst *v = ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, bp, Align(sizeof(void*)));
+ jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_binding);
+ ai.decorateInst(v);
+ v->setOrdering(get_llvm_atomic_order(order));
+ Value *isnull = ctx.builder.CreateICmpNE(v, Constant::getNullValue(ctx.types().T_prjlvalue));
+ return mark_julia_type(ctx, isnull, false, jl_bool_type);
+ }
+ }
+ Value *isdef = ctx.builder.CreateCall(prepare_call(jlboundp_func), {
+ literal_pointer_val(ctx, (jl_value_t*)modu),
+ literal_pointer_val(ctx, (jl_value_t*)name),
+ ConstantInt::get(getInt32Ty(ctx.builder.getContext()), allow_import)
+ });
+ isdef = ctx.builder.CreateTrunc(isdef, getInt1Ty(ctx.builder.getContext()));
+ return mark_julia_type(ctx, isdef, false, jl_bool_type);
}
static bool emit_f_opmemory(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f,
@@ -4473,12 +4112,6 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f,
Value *theArgs = emit_ptrgep(ctx, ctx.argArray, ctx.nReqArgs * sizeof(jl_value_t*));
Value *r = ctx.builder.CreateCall(prepare_call(jlapplygeneric_func), { theF, theArgs, nva });
*ret = mark_julia_type(ctx, r, true, jl_any_type);
- if (trim_may_error(ctx.params->trim)) {
- // if we know the return type, we can assume the result is of that type
- errs() << "ERROR: Dynamic call to Core._apply_iterate detected\n";
- errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n";
- print_stacktrace(ctx, ctx.params->trim);
- }
return true;
}
}
@@ -5311,14 +4944,14 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f,
}
// Returns ctx.types().T_prjlvalue
-static CallInst *emit_jlcall(jl_codectx_t &ctx, FunctionCallee theFptr, Value *theF,
+static CallInst *emit_jlcall(jl_codectx_t &ctx, Value *theFptr, Value *theF,
ArrayRef argv, size_t nargs, JuliaFunction<> *trampoline)
{
++EmittedJLCalls;
Function *TheTrampoline = prepare_call(trampoline);
// emit arguments
SmallVector theArgs;
- theArgs.push_back(theFptr.getCallee());
+ theArgs.push_back(theFptr);
if (theF)
theArgs.push_back(theF);
for (size_t i = 0; i < nargs; i++) {
@@ -5485,12 +5118,11 @@ static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_clos
}
static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_closure, jl_value_t *specTypes, jl_value_t *jlretty, llvm::Value *callee, StringRef specFunctionObject, jl_code_instance_t *fromexternal,
- ArrayRef argv, size_t nargs, jl_returninfo_t::CallingConv *cc, unsigned *nreturn_roots, jl_value_t *inferred_retty, Value *age_ok)
+ ArrayRef argv, size_t nargs, jl_returninfo_t::CallingConv *cc, unsigned *nreturn_roots, jl_value_t *inferred_retty)
{
++EmittedSpecfunCalls;
// emit specialized call site
- bool gcstack_arg = JL_FEAT_TEST(ctx, gcstack_arg);
- jl_returninfo_t returninfo = get_specsig_function(ctx, jl_Module, callee, specFunctionObject, specTypes, jlretty, is_opaque_closure, gcstack_arg);
+ jl_returninfo_t returninfo = get_specsig_function(ctx.emission_context, jl_Module, callee, specFunctionObject, specTypes, jlretty, is_opaque_closure);
*cc = returninfo.cc;
*nreturn_roots = returninfo.return_roots;
if (fromexternal) {
@@ -5510,31 +5142,17 @@ static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_clos
setName(ctx.emission_context, TheCallee, namep);
returninfo.decl = FunctionCallee(returninfo.decl.getFunctionType(), TheCallee);
}
- if (age_ok) {
- std::string funcName(specFunctionObject);
- funcName += "_gfthunk";
- Function *gf_thunk = Function::Create(returninfo.decl.getFunctionType(),
- GlobalVariable::InternalLinkage, funcName, jl_Module);
- jl_init_function(gf_thunk, ctx.emission_context.TargetTriple);
- gf_thunk->setAttributes(AttributeList::get(gf_thunk->getContext(), {returninfo.attrs, gf_thunk->getAttributes()}));
- // build a specsig -> jl_apply_generic converter thunk
- // this builds a method that calls jl_apply_generic (as a closure over a singleton function pointer),
- // but which has the signature of a specsig
- emit_specsig_to_fptr1(gf_thunk, returninfo.cc, returninfo.return_roots, specTypes, jlretty, is_opaque_closure, nargs, ctx.emission_context,
- prepare_call(jlapplygeneric_func));
- returninfo.decl = FunctionCallee(returninfo.decl.getFunctionType(), ctx.builder.CreateSelect(age_ok, returninfo.decl.getCallee(), gf_thunk));
- }
jl_cgval_t retval = emit_call_specfun_other(ctx, is_opaque_closure, specTypes, jlretty, returninfo, argv, nargs);
// see if inference has a different / better type for the call than the lambda
return update_julia_type(ctx, retval, inferred_retty);
}
static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, jl_method_instance_t *mi, jl_value_t *jlretty, StringRef specFunctionObject, jl_code_instance_t *fromexternal,
- ArrayRef argv, size_t nargs, jl_returninfo_t::CallingConv *cc, unsigned *return_roots, jl_value_t *inferred_retty, Value *age_ok)
+ ArrayRef argv, size_t nargs, jl_returninfo_t::CallingConv *cc, unsigned *return_roots, jl_value_t *inferred_retty)
{
bool is_opaque_closure = jl_is_method(mi->def.value) && mi->def.method->is_for_opaque_closure;
return emit_call_specfun_other(ctx, is_opaque_closure, mi->specTypes, jlretty, NULL,
- specFunctionObject, fromexternal, argv, nargs, cc, return_roots, inferred_retty, age_ok);
+ specFunctionObject, fromexternal, argv, nargs, cc, return_roots, inferred_retty);
}
static jl_value_t *get_ci_abi(jl_code_instance_t *ci)
@@ -5545,16 +5163,16 @@ static jl_value_t *get_ci_abi(jl_code_instance_t *ci)
}
static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, jl_code_instance_t *ci, StringRef specFunctionObject, jl_code_instance_t *fromexternal,
- ArrayRef argv, size_t nargs, jl_returninfo_t::CallingConv *cc, unsigned *return_roots, jl_value_t *inferred_retty, Value *age_ok)
+ ArrayRef argv, size_t nargs, jl_returninfo_t::CallingConv *cc, unsigned *return_roots, jl_value_t *inferred_retty)
{
jl_method_instance_t *mi = jl_get_ci_mi(ci);
bool is_opaque_closure = jl_is_method(mi->def.value) && mi->def.method->is_for_opaque_closure;
return emit_call_specfun_other(ctx, is_opaque_closure, get_ci_abi(ci), ci->rettype, NULL,
- specFunctionObject, fromexternal, argv, nargs, cc, return_roots, inferred_retty, age_ok);
+ specFunctionObject, fromexternal, argv, nargs, cc, return_roots, inferred_retty);
}
static jl_cgval_t emit_call_specfun_boxed(jl_codectx_t &ctx, jl_value_t *jlretty, StringRef specFunctionObject, jl_code_instance_t *fromexternal,
- ArrayRef argv, size_t nargs, jl_value_t *inferred_retty, Value *age_ok)
+ ArrayRef argv, size_t nargs, jl_value_t *inferred_retty)
{
Value *theFptr;
if (fromexternal) {
@@ -5577,9 +5195,7 @@ static jl_cgval_t emit_call_specfun_boxed(jl_codectx_t &ctx, jl_value_t *jlretty
theFptr = jl_Module->getOrInsertFunction(specFunctionObject, ctx.types().T_jlfunc).getCallee();
addRetAttr(cast(theFptr), Attribute::NonNull);
}
- if (age_ok)
- theFptr = ctx.builder.CreateSelect(age_ok, theFptr, prepare_call(jlapplygeneric_func));
- Value *ret = emit_jlcall(ctx, FunctionCallee(ctx.types().T_jlfunc, theFptr), nullptr, argv, nargs, julia_call);
+ Value *ret = emit_jlcall(ctx, theFptr, nullptr, argv, nargs, julia_call);
return update_julia_type(ctx, mark_julia_type(ctx, ret, true, jlretty), inferred_retty);
}
@@ -5597,10 +5213,10 @@ static jl_cgval_t emit_invoke(jl_codectx_t &ctx, jl_expr_t *ex, jl_value_t *rt)
if (argv[i].typ == jl_bottom_type)
return jl_cgval_t();
}
- return emit_invoke(ctx, lival, argv, nargs, rt, nullptr);
+ return emit_invoke(ctx, lival, argv, nargs, rt);
}
-static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, ArrayRef argv, size_t nargs, jl_value_t *rt, Value *age_ok)
+static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, ArrayRef argv, size_t nargs, jl_value_t *rt)
{
++EmittedInvokes;
bool handled = false;
@@ -5633,7 +5249,7 @@ static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, ArrayR
unsigned return_roots = 0;
jl_returninfo_t::CallingConv cc = jl_returninfo_t::CallingConv::Boxed;
StringRef protoname = f->getName();
- result = emit_call_specfun_other(ctx, mi, ctx.rettype, protoname, nullptr, argv, nargs, &cc, &return_roots, rt, age_ok);
+ result = emit_call_specfun_other(ctx, mi, ctx.rettype, protoname, nullptr, argv, nargs, &cc, &return_roots, rt);
}
handled = true;
}
@@ -5649,8 +5265,6 @@ static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, ArrayR
bool specsig, needsparams;
std::tie(specsig, needsparams) = uses_specsig(get_ci_abi(codeinst), mi, codeinst->rettype, ctx.params->prefer_specsig);
if (needsparams) {
- if (trim_may_error(ctx.params->trim))
- push_frames(ctx, ctx.linfo, mi);
Value *r = emit_jlcall(ctx, jlinvoke_func, track_pjlvalue(ctx, literal_pointer_val(ctx, (jl_value_t*)mi)), argv, nargs, julia_call2);
result = mark_julia_type(ctx, r, true, rt);
}
@@ -5699,14 +5313,12 @@ static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, ArrayR
jl_returninfo_t::CallingConv cc = jl_returninfo_t::CallingConv::Boxed;
unsigned return_roots = 0;
if (specsig)
- result = emit_call_specfun_other(ctx, codeinst, protoname, external ? codeinst : nullptr, argv, nargs, &cc, &return_roots, rt, age_ok);
+ result = emit_call_specfun_other(ctx, codeinst, protoname, external ? codeinst : nullptr, argv, nargs, &cc, &return_roots, rt);
else
- result = emit_call_specfun_boxed(ctx, codeinst->rettype, protoname, external ? codeinst : nullptr, argv, nargs, rt, age_ok);
+ result = emit_call_specfun_boxed(ctx, codeinst->rettype, protoname, external ? codeinst : nullptr, argv, nargs, rt);
if (need_to_emit) {
Function *trampoline_decl = cast(jl_Module->getNamedValue(protoname));
ctx.call_targets[codeinst] = {cc, return_roots, trampoline_decl, nullptr, specsig};
- if (trim_may_error(ctx.params->trim))
- push_frames(ctx, ctx.linfo, mi);
}
}
}
@@ -5715,19 +5327,8 @@ static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, ArrayR
}
}
if (!handled) {
- if (trim_may_error(ctx.params->trim)) {
- if (lival.constant) {
- push_frames(ctx, ctx.linfo, (jl_method_instance_t*)lival.constant);
- }
- else {
- errs() << "Dynamic call to unknown function";
- errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n";
-
- print_stacktrace(ctx, ctx.params->trim);
- }
- }
- Value *r = age_ok ? emit_jlcall(ctx, jlapplygeneric_func, nullptr, argv, nargs, julia_call) : emit_jlcall(ctx, jlinvoke_func, boxed(ctx, lival), argv, nargs, julia_call2);
- result = mark_julia_type(ctx, r, true, age_ok ? (jl_value_t*)jl_any_type : rt);
+ Value *r = emit_jlcall(ctx, jlinvoke_func, boxed(ctx, lival), argv, nargs, julia_call2);
+ result = mark_julia_type(ctx, r, true, rt);
}
if (result.typ == jl_bottom_type) {
#ifndef JL_NDEBUG
@@ -5785,12 +5386,6 @@ static jl_cgval_t emit_invoke_modify(jl_codectx_t &ctx, jl_expr_t *ex, jl_value_
return mark_julia_type(ctx, oldnew, true, rt);
}
}
- if (trim_may_error(ctx.params->trim)) {
- errs() << "ERROR: dynamic invoke modify call to";
- jl_(args[0]);
- errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n";
- print_stacktrace(ctx, ctx.params->trim);
- }
// emit function and arguments
Value *callval = emit_jlcall(ctx, jlapplygeneric_func, nullptr, argv, nargs, julia_call);
return mark_julia_type(ctx, callval, true, rt);
@@ -5818,9 +5413,10 @@ static jl_cgval_t emit_specsig_oc_call(jl_codectx_t &ctx, jl_value_t *oc_type, j
jl_cgval_t &theArg = argv[0];
jl_cgval_t closure_specptr = emit_getfield_knownidx(ctx, theArg, 4, (jl_datatype_t*)oc_type, jl_memory_order_notatomic);
Value *specptr = emit_unbox(ctx, ctx.types().T_size, closure_specptr, (jl_value_t*)jl_long_type);
+ specptr = emit_inttoptr(ctx, specptr, ctx.types().T_ptr);
JL_GC_PUSH1(&sigtype);
jl_cgval_t r = emit_call_specfun_other(ctx, true, sigtype, oc_rett, specptr, "", NULL, argv, nargs,
- &cc, &return_roots, oc_rett, nullptr);
+ &cc, &return_roots, oc_rett);
JL_GC_POP();
return r;
}
@@ -5864,55 +5460,21 @@ static jl_cgval_t emit_call(jl_codectx_t &ctx, jl_expr_t *ex, jl_value_t *rt, bo
// special case for some known builtin not handled by emit_builtin_call
auto it = builtin_func_map().find(builtin_fptr);
if (it != builtin_func_map().end()) {
- if (trim_may_error(ctx.params->trim)) {
- bool may_dispatch = may_dispatch_builtins().count(builtin_fptr);
- if (may_dispatch && f.constant == jl_builtin__apply_iterate && nargs >= 4) {
- if (jl_subtype(argv[2].typ, (jl_value_t*)jl_builtin_type)) {
- static jl_value_t *jl_dispatchfree_apply_iterate_type = NULL;
- if (!jl_dispatchfree_apply_iterate_type) {
- jl_value_t *types[5] = {
- (jl_value_t *)jl_simplevector_type,
- (jl_value_t *)jl_genericmemory_type,
- (jl_value_t *)jl_array_type,
- (jl_value_t *)jl_tuple_type,
- (jl_value_t *)jl_namedtuple_type,
- };
- jl_dispatchfree_apply_iterate_type = jl_as_global_root(jl_type_union(types, 5), 1);
- }
- for (size_t i = 3; i < nargs; i++) {
- auto ai = argv[i].typ;
- if (!jl_subtype(ai, jl_dispatchfree_apply_iterate_type))
- break;
- }
- may_dispatch = false;
- }
- }
- if (may_dispatch) {
- errs() << "ERROR: Dynamic call to builtin " << jl_symbol_name(((jl_datatype_t*)jl_typeof(f.constant))->name->name);
- errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n";
- print_stacktrace(ctx, ctx.params->trim);
- }
- }
Value *ret = emit_jlcall(ctx, it->second, Constant::getNullValue(ctx.types().T_prjlvalue), ArrayRef(argv).drop_front(), nargs - 1, julia_call);
setName(ctx.emission_context, ret, it->second->name + "_ret");
return mark_julia_type(ctx, ret, true, rt);
}
}
- FunctionCallee fptr;
+ Value *fptr;
JuliaFunction<> *cc;
if (f.typ == (jl_value_t*)jl_intrinsic_type) {
fptr = prepare_call(jlintrinsic_func);
cc = julia_call3;
}
else {
- fptr = FunctionCallee(get_func_sig(ctx.builder.getContext()), ctx.builder.CreateCall(prepare_call(jlgetbuiltinfptr_func), {emit_typeof(ctx, f)}));
+ fptr = ctx.builder.CreateCall(prepare_call(jlgetbuiltinfptr_func), {emit_typeof(ctx, f)});
cc = julia_call;
}
- if (trim_may_error(ctx.params->trim)) {
- errs() << "ERROR: Dynamic call to unknown builtin";
- errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n";
- print_stacktrace(ctx, ctx.params->trim);
- }
Value *ret = emit_jlcall(ctx, fptr, nullptr, argv, nargs, cc);
setName(ctx.emission_context, ret, "Builtin_ret");
return mark_julia_type(ctx, ret, true, rt);
@@ -5933,39 +5495,6 @@ static jl_cgval_t emit_call(jl_codectx_t &ctx, jl_expr_t *ex, jl_value_t *rt, bo
// TODO: else emit_oc_call
}
}
- int failed_dispatch = !argv[0].constant;
- if (ctx.params->trim != JL_TRIM_NO) {
- // TODO: Implement the last-minute call resolution that used to be here
- // in inference instead.
- }
-
- if (failed_dispatch && trim_may_error(ctx.params->trim)) {
- errs() << "Dynamic call to ";
- jl_jmp_buf *old_buf = jl_get_safe_restore();
- jl_jmp_buf buf;
- jl_set_safe_restore(&buf);
- if (!jl_setjmp(buf, 0)) {
- jl_static_show((JL_STREAM*)STDERR_FILENO, (jl_value_t*)args[0]);
- jl_printf((JL_STREAM*)STDERR_FILENO,"(");
- for (size_t i = 1; i < nargs; ++i) {
- jl_value_t *typ = argv[i].typ;
- if (!jl_is_concrete_type(typ)) // Print type in red
- jl_printf((JL_STREAM*)STDERR_FILENO, "\x1b[31m");
- jl_static_show((JL_STREAM*)STDERR_FILENO, (jl_value_t*)argv[i].typ);
- if (!jl_is_concrete_type(typ))
- jl_printf((JL_STREAM*)STDERR_FILENO, "\x1b[0m");
- if (i != nargs-1)
- jl_printf((JL_STREAM*)STDERR_FILENO,", ");
- }
- jl_printf((JL_STREAM*)STDERR_FILENO,")\n");
- }
- else {
- jl_printf((JL_STREAM*)STDERR_FILENO, "\n!!! ERROR while printing error -- ABORTING !!!\n");
- }
- jl_set_safe_restore(old_buf);
- errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n";
- print_stacktrace(ctx, ctx.params->trim);
- }
// emit function and arguments
Value *callval = emit_jlcall(ctx, jlapplygeneric_func, nullptr, argv, n_generic_args, julia_call);
return mark_julia_type(ctx, callval, true, rt);
@@ -6201,7 +5730,7 @@ static void emit_vi_assignment_unboxed(jl_codectx_t &ctx, jl_varinfo_t &vi, Valu
if (vi.inline_roots)
split_value_into(ctx, rval_info, align, vi.value.V, align, jl_aliasinfo_t::fromTBAA(ctx, tbaa), vi.inline_roots, jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe), vi.isVolatile);
else
- emit_unbox_store(ctx, rval_info, vi.value.V, tbaa, align, vi.isVolatile);
+ emit_unbox_store(ctx, rval_info, vi.value.V, tbaa, align, align, vi.isVolatile);
}
}
}
@@ -6723,8 +6252,7 @@ static std::pair get_oc_function(jl_codectx_t &ctx, jl_met
bool is_opaque_closure = jl_is_method(mi->def.value) && mi->def.method->is_for_opaque_closure;
assert(is_opaque_closure);
if (specsig) {
- bool gcstack_arg = JL_FEAT_TEST(ctx, gcstack_arg);
- jl_returninfo_t returninfo = get_specsig_function(ctx, jl_Module, nullptr, protoname, mi->specTypes, rettype, is_opaque_closure, gcstack_arg);
+ jl_returninfo_t returninfo = get_specsig_function(ctx.emission_context, jl_Module, nullptr, protoname, mi->specTypes, rettype, is_opaque_closure);
cc = returninfo.cc;
return_roots = returninfo.return_roots;
specF = cast(returninfo.decl.getCallee());
@@ -6883,8 +6411,6 @@ static jl_cgval_t emit_expr(jl_codectx_t &ctx, jl_value_t *expr, ssize_t ssaidx_
jl_value_t *mn = args[0];
assert(jl_is_symbol(mn) || jl_is_slotnumber(mn) || jl_is_globalref(mn));
- Value *bp = NULL, *name;
- jl_binding_t *bnd = NULL;
bool issym = jl_is_symbol(mn);
bool isglobalref = !issym && jl_is_globalref(mn);
jl_module_t *mod = ctx.module;
@@ -6893,26 +6419,11 @@ static jl_cgval_t emit_expr(jl_codectx_t &ctx, jl_value_t *expr, ssize_t ssaidx_
mod = jl_globalref_mod(mn);
mn = (jl_value_t*)jl_globalref_name(mn);
}
- JL_TRY {
- if (jl_symbol_name((jl_sym_t*)mn)[0] == '@')
- jl_errorf("macro definition not allowed inside a local scope");
- name = literal_pointer_val(ctx, mn);
- bnd = jl_get_binding_for_method_def(mod, (jl_sym_t*)mn);
- }
- JL_CATCH {
- jl_value_t *e = jl_current_exception(jl_current_task);
- // errors. boo. :(
- JL_GC_PUSH1(&e);
- e = jl_as_global_root(e, 1);
- JL_GC_POP();
- raise_exception(ctx, literal_pointer_val(ctx, e));
- return ghostValue(ctx, jl_nothing_type);
- }
- bp = julia_binding_gv(ctx, bnd);
jl_cgval_t gf = mark_julia_type(
ctx,
- ctx.builder.CreateCall(prepare_call(jlgenericfunction_func), { bp,
- literal_pointer_val(ctx, (jl_value_t*)mod), name
+ ctx.builder.CreateCall(prepare_call(jlgenericfunction_func), {
+ literal_pointer_val(ctx, (jl_value_t*)mod),
+ literal_pointer_val(ctx, (jl_value_t*)mn)
}),
true,
jl_function_type);
@@ -6958,16 +6469,21 @@ static jl_cgval_t emit_expr(jl_codectx_t &ctx, jl_value_t *expr, ssize_t ssaidx_
}
}
else if (head == jl_globaldecl_sym) {
- assert(nargs == 2);
+ assert(nargs <= 2 && nargs >= 1);
jl_sym_t *sym = (jl_sym_t*)args[0];
jl_module_t *mod = ctx.module;
if (jl_is_globalref(sym)) {
mod = jl_globalref_mod(sym);
sym = jl_globalref_name(sym);
}
- jl_cgval_t typ = emit_expr(ctx, args[1]);
- ctx.builder.CreateCall(prepare_call(jldeclareglobal_func),
- { literal_pointer_val(ctx, (jl_value_t*)mod), literal_pointer_val(ctx, (jl_value_t*)sym), boxed(ctx, typ) });
+ if (nargs == 2) {
+ jl_cgval_t typ = emit_expr(ctx, args[1]);
+ ctx.builder.CreateCall(prepare_call(jldeclareglobal_func),
+ { literal_pointer_val(ctx, (jl_value_t*)mod), literal_pointer_val(ctx, (jl_value_t*)sym), boxed(ctx, typ), ConstantInt::get(getInt32Ty(ctx.builder.getContext()), 1) });
+ } else {
+ ctx.builder.CreateCall(prepare_call(jldeclareglobal_func),
+ { literal_pointer_val(ctx, (jl_value_t*)mod), literal_pointer_val(ctx, (jl_value_t*)sym), ConstantPointerNull::get(cast(ctx.types().T_prjlvalue)), ConstantInt::get(getInt32Ty(ctx.builder.getContext()), 1) });
+ }
}
else if (head == jl_new_sym) {
bool is_promotable = false;
@@ -7029,12 +6545,6 @@ static jl_cgval_t emit_expr(jl_codectx_t &ctx, jl_value_t *expr, ssize_t ssaidx_
((jl_method_t*)source.constant)->nargs > 0 &&
jl_is_valid_oc_argtype((jl_tupletype_t*)argt.constant, (jl_method_t*)source.constant);
- if (!can_optimize && trim_may_error(ctx.params->trim)) {
- // if we know the return type, we can assume the result is of that type
- errs() << "ERROR: Dynamic call to OpaqueClosure method\n";
- errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n";
- print_stacktrace(ctx, ctx.params->trim);
- }
if (can_optimize) {
jl_value_t *closure_t = NULL;
@@ -7192,7 +6702,7 @@ JL_GCC_IGNORE_STOP
// --- generate function bodies ---
// gc frame emission
-static void allocate_gc_frame(jl_codectx_t &ctx, BasicBlock *b0, bool or_new=false)
+static void allocate_gc_frame(jl_codectx_t &ctx, BasicBlock *b0, bool or_new=false) JL_NOTSAFEPOINT
{
// allocate a placeholder gc instruction
// this will require the runtime, but it gets deleted later if unused
@@ -7244,12 +6754,34 @@ static Value *get_scope_field(jl_codectx_t &ctx)
return emit_ptrgep(ctx, ct, offsetof(jl_task_t, scope), "scope");
}
+static std::string get_function_name(bool specsig, bool needsparams, const char *unadorned_name, const Triple &TargetTriple)
+{
+ std::string _funcName;
+ raw_string_ostream funcName(_funcName);
+ // try to avoid conflicts in the global symbol table
+ if (specsig)
+ funcName << "julia_"; // api 5
+ else if (needsparams)
+ funcName << "japi3_";
+ else
+ funcName << "japi1_";
+ if (TargetTriple.isOSLinux()) {
+ if (unadorned_name[0] == '@')
+ unadorned_name++;
+ }
+ funcName << unadorned_name << "_" << jl_atomic_fetch_add_relaxed(&globalUniqueGeneratedNames, 1);
+ return funcName.str();
+}
+
+static void gen_invoke_wrapper(jl_method_instance_t *lam, jl_value_t *abi, jl_value_t *jlretty, jl_value_t *declrt, jl_returninfo_t &f, unsigned nargs, int retarg, bool is_opaque_closure, StringRef funcName,
+ Module *M, jl_codegen_params_t ¶ms);
+
Function *get_or_emit_fptr1(StringRef preal_decl, Module *M)
{
return cast(M->getOrInsertFunction(preal_decl, get_func_sig(M->getContext()), get_func_attrs(M->getContext())).getCallee());
}
-Function *emit_tojlinvoke(jl_code_instance_t *codeinst, StringRef theFptrName, Module *M, jl_codegen_params_t ¶ms) JL_NOTSAFEPOINT
+Function *emit_tojlinvoke(jl_code_instance_t *codeinst, Value *theFunc, Module *M, jl_codegen_params_t ¶ms) JL_NOTSAFEPOINT
{
++EmittedToJLInvokes;
jl_codectx_t ctx(M->getContext(), params, codeinst);
@@ -7259,20 +6791,14 @@ Function *emit_tojlinvoke(jl_code_instance_t *codeinst, StringRef theFptrName, M
GlobalVariable::InternalLinkage,
name, M);
jl_init_function(f, params.TargetTriple);
- if (trim_may_error(params.params->trim)) {
- push_frames(ctx, ctx.linfo, jl_get_ci_mi(codeinst));
- }
jl_name_jlfunc_args(params, f);
//f->setAlwaysInline();
ctx.f = f; // for jl_Module
- BasicBlock *b0 = BasicBlock::Create(ctx.builder.getContext(), "top", f);
+ BasicBlock *b0 = BasicBlock::Create(M->getContext(), "top", f);
ctx.builder.SetInsertPoint(b0);
- Function *theFunc;
Value *theFarg;
- if (!theFptrName.empty()) {
- theFunc = cast(
- M->getOrInsertFunction(theFptrName, jlinvoke_func->_type(ctx.builder.getContext())).getCallee());
+ if (theFunc) {
theFarg = literal_pointer_val(ctx, (jl_value_t*)codeinst);
}
else {
@@ -7283,12 +6809,20 @@ Function *emit_tojlinvoke(jl_code_instance_t *codeinst, StringRef theFptrName, M
}
theFarg = track_pjlvalue(ctx, theFarg);
auto args = f->arg_begin();
- CallInst *r = ctx.builder.CreateCall(theFunc, { &*args, &*++args, &*++args, theFarg });
- r->setAttributes(theFunc->getAttributes());
+ CallInst *r = ctx.builder.CreateCall(FunctionCallee(jlinvoke_func->_type(M->getContext()), theFunc), { &*args, &*++args, &*++args, theFarg });
+ r->setAttributes(jlinvoke_func->_attrs(M->getContext()));
ctx.builder.CreateRet(r);
return f;
}
+Function *emit_tojlinvoke(jl_code_instance_t *codeinst, StringRef theFptrName, Module *M, jl_codegen_params_t ¶ms) JL_NOTSAFEPOINT
+{
+ Value *theFunc = nullptr;
+ if (!theFptrName.empty())
+ theFunc = M->getOrInsertFunction(theFptrName, jlinvoke_func->_type(M->getContext()), jlinvoke_func->_attrs(M->getContext())).getCallee();
+ return emit_tojlinvoke(codeinst, theFunc, M, params);
+}
+
static jl_value_t *get_oc_type(jl_value_t *calltype, jl_value_t *rettype) JL_ALWAYS_LEAFTYPE
{
jl_value_t *argtype = jl_argtype_without_function((jl_value_t*)calltype);
@@ -7299,12 +6833,16 @@ static jl_value_t *get_oc_type(jl_value_t *calltype, jl_value_t *rettype) JL_ALW
return oc_type;
}
-void emit_specsig_to_fptr1(
+static void emit_specsig_to_specsig(
Function *gf_thunk, jl_returninfo_t::CallingConv cc, unsigned return_roots,
jl_value_t *calltype, jl_value_t *rettype, bool is_for_opaque_closure,
size_t nargs,
jl_codegen_params_t ¶ms,
- Function *target)
+ Value *target,
+ jl_value_t *targetsig,
+ jl_value_t *targetrt,
+ jl_returninfo_t *targetspec,
+ jl_value_t *rettype_const)
{
++EmittedCFuncInvalidates;
jl_codectx_t ctx(gf_thunk->getParent()->getContext(), params, 0, 0);
@@ -7321,7 +6859,7 @@ void emit_specsig_to_fptr1(
++AI;
if (return_roots)
++AI;
- if (JL_FEAT_TEST(ctx,gcstack_arg)){
+ if (JL_FEAT_TEST(ctx,gcstack_arg)) {
++AI; // gcstack_arg
}
for (size_t i = 0; i < nargs; i++) {
@@ -7372,16 +6910,23 @@ void emit_specsig_to_fptr1(
}
}
assert(AI == gf_thunk->arg_end());
- Value *gf_ret = emit_jlcall(ctx, target, nullptr, myargs, nargs, julia_call);
- jl_cgval_t gf_retbox = mark_julia_type(ctx, gf_ret, true, jl_any_type);
- if (cc != jl_returninfo_t::Boxed) {
- emit_typecheck(ctx, gf_retbox, rettype, "cfunction");
- gf_retbox = update_julia_type(ctx, gf_retbox, rettype);
+ jl_cgval_t gf_retval;
+ if (target || targetspec) {
+ if (targetspec == nullptr)
+ gf_retval = mark_julia_type(ctx, emit_jlcall(ctx, target, nullptr, myargs, nargs, julia_call), true, targetrt);
+ else
+ gf_retval = emit_call_specfun_other(ctx, is_for_opaque_closure, targetsig, targetrt, *targetspec, myargs, nargs);
+ }
+ if (rettype_const)
+ gf_retval = mark_julia_const(ctx, rettype_const);
+ if (targetrt != rettype) {
+ emit_typecheck(ctx, gf_retval, rettype, "cfunction");
+ gf_retval = update_julia_type(ctx, gf_retval, rettype);
}
switch (cc) {
case jl_returninfo_t::Boxed:
- ctx.builder.CreateRet(gf_ret);
+ ctx.builder.CreateRet(boxed(ctx, gf_retval));
break;
case jl_returninfo_t::Register: {
Type *gfrt = gf_thunk->getReturnType();
@@ -7389,7 +6934,7 @@ void emit_specsig_to_fptr1(
ctx.builder.CreateRetVoid();
}
else {
- ctx.builder.CreateRet(ctx.builder.CreateAlignedLoad(gfrt, gf_ret, Align(julia_alignment(rettype))));
+ ctx.builder.CreateRet(emit_unbox(ctx, gfrt, gf_retval, rettype));
}
break;
}
@@ -7398,65 +6943,273 @@ void emit_specsig_to_fptr1(
Align align(julia_alignment(rettype));
if (return_roots) {
Value *roots = gf_thunk->arg_begin() + 1; // root1 has type [n x {}*]*
- split_value_into(ctx, gf_retbox, align, sret, align, jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack), roots, jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe));
+ split_value_into(ctx, gf_retval, align, sret, align, jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack), roots, jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe));
}
else {
- emit_unbox_store(ctx, gf_retbox, sret, ctx.tbaa().tbaa_stack, align);
+ emit_unbox_store(ctx, gf_retval, sret, ctx.tbaa().tbaa_stack, align, align);
}
ctx.builder.CreateRetVoid();
break;
}
case jl_returninfo_t::Union: {
+ Value *gf_ret = boxed(ctx, gf_retval); // TODO: this is not the most optimal way to emit this
Type *retty = gf_thunk->getReturnType();
- Value *gf_retval = UndefValue::get(retty);
- Value *tindex = compute_box_tindex(ctx, emit_typeof(ctx, gf_retbox, false, true), (jl_value_t*)jl_any_type, rettype);
+ Value *retval = UndefValue::get(retty);
+ Value *tindex = compute_box_tindex(ctx, emit_typeof(ctx, gf_retval, false, true), (jl_value_t*)jl_any_type, rettype);
tindex = ctx.builder.CreateOr(tindex, ConstantInt::get(getInt8Ty(ctx.builder.getContext()), UNION_BOX_MARKER));
- gf_retval = ctx.builder.CreateInsertValue(gf_retval, gf_ret, 0);
- gf_retval = ctx.builder.CreateInsertValue(gf_retval, tindex, 1);
- ctx.builder.CreateRet(gf_retval);
+ retval = ctx.builder.CreateInsertValue(retval, gf_ret, 0);
+ retval = ctx.builder.CreateInsertValue(retval, tindex, 1);
+ ctx.builder.CreateRet(retval);
break;
}
case jl_returninfo_t::Ghosts: {
- Value *gf_retval = compute_tindex_unboxed(ctx, gf_retbox, rettype);
- ctx.builder.CreateRet(gf_retval);
+ Value *retval = compute_tindex_unboxed(ctx, gf_retval, rettype);
+ ctx.builder.CreateRet(retval);
break;
}
}
}
+void emit_specsig_to_fptr1(
+ Function *gf_thunk, jl_returninfo_t::CallingConv cc, unsigned return_roots,
+ jl_value_t *calltype, jl_value_t *rettype, bool is_for_opaque_closure,
+ size_t nargs,
+ jl_codegen_params_t ¶ms,
+ Function *target)
+{
+ emit_specsig_to_specsig(gf_thunk, cc, return_roots, calltype, rettype, is_for_opaque_closure, nargs, params, target, calltype, rettype, nullptr, nullptr);
+}
+
+static void emit_fptr1_wrapper(Module *M, StringRef gf_thunk_name, Value *target, jl_value_t *rettype_const, jl_value_t *declrt, jl_value_t *jlrettype, jl_codegen_params_t ¶ms)
+{
+ Function *w = Function::Create(get_func_sig(M->getContext()), GlobalVariable::ExternalLinkage, gf_thunk_name, M);
+ jl_init_function(w, params.TargetTriple);
+ w->setAttributes(AttributeList::get(M->getContext(), {get_func_attrs(M->getContext()), w->getAttributes()}));
+ w->addFnAttr(Attribute::OptimizeNone);
+ w->addFnAttr(Attribute::NoInline);
+
+ jl_codectx_t ctx(M->getContext(), params, 0, 0);
+ ctx.f = w;
+ ctx.rettype = declrt;
+
+ BasicBlock *b0 = BasicBlock::Create(ctx.builder.getContext(), "top", w);
+ ctx.builder.SetInsertPoint(b0);
+ DebugLoc noDbg;
+ ctx.builder.SetCurrentDebugLocation(noDbg);
+ allocate_gc_frame(ctx, b0);
+
+ jl_cgval_t gf_retval;
+ if (target) {
+ FunctionCallee theFunc(w->getFunctionType(), target);
+ auto args = w->arg_begin();
+ CallInst *r = ctx.builder.CreateCall(theFunc, { &*args, &*++args, &*++args }); // cf emit_tojlinvoke
+ assert(++args == w->arg_end());
+ r->setAttributes(w->getAttributes());
+ gf_retval = mark_julia_type(ctx, r, true, jlrettype);
+ }
+ if (rettype_const)
+ gf_retval = mark_julia_const(ctx, rettype_const);
+ if (jlrettype != declrt)
+ emit_typecheck(ctx, gf_retval, declrt, "cfunction");
+ ctx.builder.CreateRet(boxed(ctx, gf_retval));
+}
+
+static void emit_specsig_to_specsig(
+ Module *M, StringRef gf_thunk_name,
+ jl_value_t *calltype, jl_value_t *rettype, bool is_for_opaque_closure,
+ size_t nargs,
+ jl_codegen_params_t ¶ms,
+ Value *target,
+ jl_value_t *targetsig,
+ jl_value_t *targetrt,
+ jl_returninfo_t *targetspec,
+ jl_value_t *rettype_const)
+{
+ jl_returninfo_t returninfo = get_specsig_function(params, M, nullptr, gf_thunk_name, calltype, rettype, is_for_opaque_closure);
+ Function *gf_thunk = cast(returninfo.decl.getCallee());
+ jl_init_function(gf_thunk, params.TargetTriple);
+ gf_thunk->setAttributes(AttributeList::get(gf_thunk->getContext(), {returninfo.attrs, gf_thunk->getAttributes()}));
+ emit_specsig_to_specsig(gf_thunk, returninfo.cc, returninfo.return_roots, calltype, rettype, is_for_opaque_closure, nargs, params, target, targetsig, targetrt, targetspec, rettype_const);
+}
+
+std::string emit_abi_converter(Module *M, jl_codegen_params_t ¶ms, jl_value_t *declrt, jl_value_t *sigt, size_t nargs, bool specsig, jl_code_instance_t *codeinst, Value *target, bool target_specsig)
+{
+ // this builds a method that calls a method with the same arguments but a different specsig
+ // build a specsig -> specsig converter thunk
+ // build a specsig -> arg1 converter thunk
+ // build a args1 -> specsig converter thunk (gen_invoke_wrapper)
+ // build a args1 -> args1 converter thunk (to add typeassert on result)
+ bool needsparams = false;
+ bool is_opaque_closure = false;
+ jl_method_instance_t *mi = jl_get_ci_mi(codeinst);
+ std::string gf_thunk_name = get_function_name(specsig, needsparams, name_from_method_instance(mi), params.TargetTriple);
+ gf_thunk_name += "_gfthunk";
+ if (target_specsig) {
+ jl_value_t *abi = get_ci_abi(codeinst);
+ jl_returninfo_t targetspec = get_specsig_function(params, M, target, "", abi, codeinst->rettype, is_opaque_closure);
+ if (specsig)
+ emit_specsig_to_specsig(M, gf_thunk_name, sigt, declrt, is_opaque_closure, nargs, params,
+ target, mi->specTypes, codeinst->rettype, &targetspec, nullptr);
+ else
+ gen_invoke_wrapper(mi, abi, codeinst->rettype, declrt, targetspec, nargs, -1, is_opaque_closure, gf_thunk_name, M, params);
+ }
+ else {
+ if (specsig)
+ emit_specsig_to_specsig(M, gf_thunk_name, sigt, declrt, is_opaque_closure, nargs, params,
+ target, mi->specTypes, codeinst->rettype, nullptr, nullptr);
+ else
+ emit_fptr1_wrapper(M, gf_thunk_name, target, nullptr, declrt, codeinst->rettype, params);
+ }
+ return gf_thunk_name;
+}
+
+std::string emit_abi_dispatcher(Module *M, jl_codegen_params_t ¶ms, jl_value_t *declrt, jl_value_t *sigt, size_t nargs, bool specsig, jl_code_instance_t *codeinst, Value *invoke)
+{
+ // this builds a method that calls a method with the same arguments but a different specsig
+ // build a specsig -> args1 (apply_generic) or invoke (emit_tojlinvoke) call
+ // build a args1 -> args1 call (emit_fptr1_wrapper)
+ // build a args1 -> invoke call (emit_tojlinvoke)
+ bool is_opaque_closure = false;
+ Value *target;
+ if (!codeinst)
+ target = prepare_call_in(M, jlapplygeneric_func);
+ else
+ target = emit_tojlinvoke(codeinst, invoke, M, params); // TODO: inline this call?
+ std::string gf_thunk_name;
+ if (codeinst)
+ raw_string_ostream(gf_thunk_name) << "jfptr_" << name_from_method_instance(jl_get_ci_mi(codeinst)) << "_";
+ else
+ raw_string_ostream(gf_thunk_name) << "j_";
+ raw_string_ostream(gf_thunk_name) << jl_atomic_fetch_add_relaxed(&globalUniqueGeneratedNames, 1) << "_gfthunk";
+ if (specsig)
+ emit_specsig_to_specsig(M, gf_thunk_name, sigt, declrt, is_opaque_closure, nargs, params,
+ target, sigt, codeinst ? codeinst->rettype : (jl_value_t*)jl_any_type, nullptr, nullptr);
+ else
+ emit_fptr1_wrapper(M, gf_thunk_name, target, nullptr, declrt, codeinst ? codeinst->rettype : (jl_value_t*)jl_any_type, params);
+ return gf_thunk_name;
+}
+
+std::string emit_abi_constreturn(Module *M, jl_codegen_params_t ¶ms, jl_value_t *declrt, jl_value_t *sigt, size_t nargs, bool specsig, jl_value_t *rettype_const)
+{
+ bool is_opaque_closure = false;
+ std::string gf_thunk_name;
+ raw_string_ostream(gf_thunk_name) << "jconst_" << jl_atomic_fetch_add_relaxed(&globalUniqueGeneratedNames, 1);
+ if (specsig) {
+ emit_specsig_to_specsig(M, gf_thunk_name, sigt, declrt, is_opaque_closure, nargs, params,
+ nullptr, sigt, jl_typeof(rettype_const), nullptr, rettype_const);
+ }
+ else {
+ emit_fptr1_wrapper(M, gf_thunk_name, nullptr, rettype_const, declrt, jl_typeof(rettype_const), params);
+ }
+ return gf_thunk_name;
+}
+
+std::string emit_abi_constreturn(Module *M, jl_codegen_params_t ¶ms, bool specsig, jl_code_instance_t *codeinst)
+{
+ jl_value_t *abi = get_ci_abi(codeinst);
+ return emit_abi_constreturn(M, params, codeinst->rettype, abi, specsig ? jl_nparams(abi) : 0, specsig, codeinst->rettype_const);
+}
+
+// release jl_world_counter
+// store theFptr
+// release last_world_v
+//
+// acquire last_world_v
+// read theFptr
+// acquire jl_world_counter
+// if (last_world_v != jl_world_counter)
+// fptr = compute_new_fptr(&last_world_v)
+// return fptr()
+static jl_cgval_t emit_abi_call(jl_codectx_t &ctx, jl_value_t *declrt, jl_value_t *sigt, ArrayRef inputargs, size_t nargs, Value *world_age_field)
+{
+ jl_cgval_t retval;
+ if (sigt) {
+ jl_temporary_root(ctx, declrt);
+ jl_temporary_root(ctx, sigt);
+ assert(nargs == jl_nparams(sigt));
+ bool needsparams = false;
+ bool is_opaque_closure = false;
+ bool specsig = uses_specsig(sigt, needsparams, declrt, ctx.params->prefer_specsig);
+ PointerType *T_ptr = ctx.types().T_ptr;
+ Type *T_size = ctx.types().T_size;
+ Constant *Vnull = ConstantPointerNull::get(T_ptr);
+ Module *M = jl_Module;
+ GlobalVariable *theFptr = new GlobalVariable(*M, T_ptr, false,
+ GlobalVariable::PrivateLinkage,
+ Vnull);
+ GlobalVariable *last_world_p = new GlobalVariable(*M, T_size, false,
+ GlobalVariable::PrivateLinkage,
+ ConstantInt::get(T_size, 0));
+ ArrayType *T_cfuncdata = ArrayType::get(T_ptr, 6);
+ size_t flags = specsig;
+ GlobalVariable *cfuncdata = new GlobalVariable(*M, T_cfuncdata, false,
+ GlobalVariable::PrivateLinkage,
+ ConstantArray::get(T_cfuncdata, {
+ Vnull,
+ Vnull,
+ Vnull,
+ literal_pointer_val_slot(ctx.emission_context, M, declrt),
+ literal_pointer_val_slot(ctx.emission_context, M, sigt),
+ literal_static_pointer_val((void*)flags, T_ptr)}));
+ LoadInst *last_world_v = ctx.builder.CreateAlignedLoad(T_size, last_world_p, ctx.types().alignof_ptr);
+ last_world_v->setOrdering(AtomicOrdering::Acquire);
+ LoadInst *callee = ctx.builder.CreateAlignedLoad(T_ptr, theFptr, ctx.types().alignof_ptr);
+ callee->setOrdering(AtomicOrdering::Monotonic);
+ LoadInst *world_v = ctx.builder.CreateAlignedLoad(ctx.types().T_size,
+ prepare_global_in(M, jlgetworld_global), ctx.types().alignof_ptr);
+ world_v->setOrdering(AtomicOrdering::Acquire);
+ ctx.builder.CreateStore(world_v, world_age_field);
+ Value *age_not_ok = ctx.builder.CreateICmpNE(last_world_v, world_v);
+ Value *target = emit_guarded_test(ctx, age_not_ok, callee, [&] {
+ Function *getcaller = prepare_call(jlgetabiconverter_func);
+ CallInst *cw = ctx.builder.CreateCall(getcaller, {
+ get_current_task(ctx),
+ theFptr,
+ last_world_p,
+ cfuncdata});
+ cw->setAttributes(getcaller->getAttributes());
+ return cw;
+ });
+ ctx.emission_context.cfuncs.push_back({declrt, sigt, nargs, specsig, theFptr, cfuncdata});
+ if (specsig) {
+ // TODO: could we force this to guarantee passing a box for `f` here (since we
+ // know we had it here) and on the receiver end (emit_abi_converter /
+ // emit_abi_dispatcher), force it to know that it can simply use this pointer
+ // instead of re-boxing it if it needs to the boxed copy of it. This comes up
+ // very rarely since usually the ABI calls are concrete and match exactly and
+ // aren't closures, but sometimes there are cases like that because of
+ // `::Function` de-specialization heuristics, such as for the `Returns` callable
+ // given that it is `@nospecialize`.
+ jl_returninfo_t targetspec = get_specsig_function(ctx.emission_context, M, target, "", sigt, declrt, is_opaque_closure);
+ retval = emit_call_specfun_other(ctx, is_opaque_closure, sigt, declrt, targetspec, inputargs, nargs);
+ }
+ else {
+ retval = mark_julia_type(ctx, emit_jlcall(ctx, target, nullptr, inputargs, nargs, julia_call), true, declrt);
+ }
+ }
+ else {
+ // emit a dispatch
+ Value *ret = emit_jlcall(ctx, jlapplygeneric_func, NULL, inputargs, nargs, julia_call);
+ retval = mark_julia_type(ctx, ret, true, jl_any_type);
+ // inline a call to typeassert here
+ emit_typecheck(ctx, retval, declrt, "cfunction");
+ retval = update_julia_type(ctx, retval, declrt);
+ }
+ return retval;
+}
+
static Function *gen_cfun_wrapper(
Module *into, jl_codegen_params_t ¶ms,
const function_sig_t &sig, jl_value_t *ff, const char *aliasname,
- jl_value_t *declrt, jl_method_instance_t *lam,
+ jl_value_t *declrt, jl_value_t *sigt,
jl_unionall_t *unionall_env, jl_svec_t *sparam_vals, jl_array_t **closure_types)
{
++GeneratedCFuncWrappers;
// Generate a c-callable wrapper
assert(into);
size_t nargs = sig.nccallargs;
- const char *name = "cfunction";
- size_t world = jl_atomic_load_acquire(&jl_world_counter);
+ const char *name = aliasname ? aliasname : "cfunction";
bool nest = (!ff || unionall_env);
- jl_value_t *astrt = (jl_value_t*)jl_any_type;
- if (aliasname)
- name = aliasname;
- else if (lam)
- name = jl_symbol_name(lam->def.method->name);
-
- jl_code_instance_t *codeinst = NULL;
- if (lam) {
- // TODO: this isn't ideal to be unconditionally calling type inference from here
- codeinst = jl_type_infer(lam, world, SOURCE_MODE_NOT_REQUIRED);
- if (codeinst)
- astrt = codeinst->rettype;
- if (astrt != (jl_value_t*)jl_bottom_type &&
- jl_type_intersection(astrt, declrt) == jl_bottom_type) {
- // Do not warn if the function never returns since it is
- // occasionally required by the C API (typically error callbacks)
- // even though we're likely to encounter memory errors in that case
- jl_printf(JL_STDERR, "WARNING: cfunction: return type of %s does not match\n", name);
- }
- }
std::string funcName;
raw_string_ostream(funcName) << "jlcapi_" << name << "_" << jl_atomic_fetch_add_relaxed(&globalUniqueGeneratedNames, 1);
@@ -7538,19 +7291,6 @@ static Function *gen_cfun_wrapper(
jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe);
ctx.world_age_at_entry = ai.decorateInst(
ctx.builder.CreateAlignedLoad(ctx.types().T_size, world_age_field, ctx.types().alignof_ptr));
- Value *world_v = ctx.builder.CreateAlignedLoad(ctx.types().T_size,
- prepare_global_in(jl_Module, jlgetworld_global), ctx.types().alignof_ptr);
- cast(world_v)->setOrdering(AtomicOrdering::Acquire);
-
- Value *age_ok = nullptr;
- if (codeinst) {
- LoadInst *lam_max = ctx.builder.CreateAlignedLoad(
- ctx.types().T_size,
- emit_ptrgep(ctx, literal_pointer_val(ctx, (jl_value_t*)codeinst), offsetof(jl_code_instance_t, max_world)),
- ctx.types().alignof_ptr);
- age_ok = ctx.builder.CreateICmpUGE(lam_max, world_v);
- }
- ctx.builder.CreateStore(world_v, world_age_field);
// first emit code to record the arguments
Function::arg_iterator AI = cw->arg_begin();
@@ -7615,8 +7355,8 @@ static Function *gen_cfun_wrapper(
inputarg = mark_julia_type(ctx, val, false, jargty);
}
}
- else if (static_at || (!jl_is_typevar(jargty) && !jl_is_immutable_datatype(jargty))) {
- // must be a jl_value_t* (because it's mutable or contains gc roots)
+ else if (static_at || (!jl_is_typevar(jargty) && (!jl_is_datatype(jargty) || jl_is_abstracttype(jargty) || jl_is_mutable_datatype(jargty)))) {
+ // must be a jl_value_t* (because it is mutable or abstract)
inputarg = mark_julia_type(ctx, maybe_decay_untracked(ctx, val), true, jargty_proper);
}
else {
@@ -7630,31 +7370,36 @@ static Function *gen_cfun_wrapper(
emit_ptrgep(ctx, nestPtr, jl_array_nrows(*closure_types) * ctx.types().sizeof_ptr),
Align(sizeof(void*)));
BasicBlock *boxedBB = BasicBlock::Create(ctx.builder.getContext(), "isboxed", cw);
- BasicBlock *loadBB = BasicBlock::Create(ctx.builder.getContext(), "need-load", cw);
+ BasicBlock *notanyBB = BasicBlock::Create(ctx.builder.getContext(), "not-any", cw);
BasicBlock *unboxedBB = BasicBlock::Create(ctx.builder.getContext(), "maybe-unboxed", cw);
BasicBlock *isanyBB = BasicBlock::Create(ctx.builder.getContext(), "any", cw);
BasicBlock *afterBB = BasicBlock::Create(ctx.builder.getContext(), "after", cw);
- Value *isrtboxed = ctx.builder.CreateIsNull(val); // XXX: this is the wrong condition and should be inspecting runtime_dt instead
- ctx.builder.CreateCondBr(isrtboxed, boxedBB, loadBB);
- ctx.builder.SetInsertPoint(boxedBB);
- Value *p1 = val;
- p1 = track_pjlvalue(ctx, p1);
- ctx.builder.CreateBr(afterBB);
- ctx.builder.SetInsertPoint(loadBB);
Value *isrtany = ctx.builder.CreateICmpEQ(
- literal_pointer_val(ctx, (jl_value_t*)jl_any_type), val);
- ctx.builder.CreateCondBr(isrtany, isanyBB, unboxedBB);
+ track_pjlvalue(ctx,literal_pointer_val(ctx, (jl_value_t*)jl_any_type)), runtime_dt);
+ ctx.builder.CreateCondBr(isrtany, isanyBB, notanyBB);
ctx.builder.SetInsertPoint(isanyBB);
- Value *p2 = ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, val, Align(sizeof(void*)));
+ Value *p1 = ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, val, Align(sizeof(void*)));
ctx.builder.CreateBr(afterBB);
+ isanyBB = ctx.builder.GetInsertBlock(); // could have changed
+ ctx.builder.SetInsertPoint(notanyBB);
+ jl_cgval_t runtime_dt_val = mark_julia_type(ctx, runtime_dt, true, jl_any_type);
+ Value *isrtboxed = // (!jl_is_datatype(runtime_dt) || !jl_is_concrete_datatype(runtime_dt) || jl_is_mutable_datatype(runtime_dt))
+ emit_guarded_test(ctx, emit_exactly_isa(ctx, runtime_dt_val, jl_datatype_type), true, [&] {
+ return ctx.builder.CreateOr(ctx.builder.CreateNot(emit_isconcrete(ctx, runtime_dt)), emit_datatype_mutabl(ctx, runtime_dt));
+ });
+ ctx.builder.CreateCondBr(isrtboxed, boxedBB, unboxedBB);
+ ctx.builder.SetInsertPoint(boxedBB);
+ Value *p2 = track_pjlvalue(ctx, val);
+ ctx.builder.CreateBr(afterBB);
+ boxedBB = ctx.builder.GetInsertBlock(); // could have changed
ctx.builder.SetInsertPoint(unboxedBB);
Value *p3 = emit_new_bits(ctx, runtime_dt, val);
unboxedBB = ctx.builder.GetInsertBlock(); // could have changed
ctx.builder.CreateBr(afterBB);
ctx.builder.SetInsertPoint(afterBB);
PHINode *p = ctx.builder.CreatePHI(ctx.types().T_prjlvalue, 3);
- p->addIncoming(p1, boxedBB);
- p->addIncoming(p2, isanyBB);
+ p->addIncoming(p1, isanyBB);
+ p->addIncoming(p2, boxedBB);
p->addIncoming(p3, unboxedBB);
inputarg = mark_julia_type(ctx, p, true, jargty_proper);
}
@@ -7705,30 +7450,8 @@ static Function *gen_cfun_wrapper(
assert(AI == cw->arg_end());
// Create the call
- bool jlfunc_sret;
- jl_cgval_t retval;
- if (codeinst) {
- retval = emit_invoke(ctx, mark_julia_const(ctx, (jl_value_t*)codeinst), inputargs, nargs + 1, astrt, age_ok);
- jlfunc_sret = retval.V && isa(retval.V) && !retval.TIndex && retval.inline_roots.empty();
- if (jlfunc_sret && sig.sret) {
- // fuse the two sret together
- assert(retval.ispointer());
- AllocaInst *result = cast(retval.V);
- retval.V = sretPtr;
- result->replaceAllUsesWith(sretPtr);
- result->eraseFromParent();
- }
- }
- else {
- // emit a dispatch
- jlfunc_sret = false;
- Value *ret = emit_jlcall(ctx, jlapplygeneric_func, NULL, inputargs, nargs + 1, julia_call);
- retval = mark_julia_type(ctx, ret, true, astrt);
- }
-
- // inline a call to typeassert here, if required
- emit_typecheck(ctx, retval, declrt, "cfunction");
- retval = update_julia_type(ctx, retval, declrt);
+ jl_cgval_t retval = emit_abi_call(ctx, declrt, sigt, inputargs, nargs + 1, world_age_field);
+ bool jlfunc_sret = retval.V && isa(retval.V) && !retval.TIndex && retval.inline_roots.empty();
// Prepare the return value
Value *r;
@@ -7738,7 +7461,12 @@ static Function *gen_cfun_wrapper(
r = boxed(ctx, retval);
}
else if (sig.sret && jlfunc_sret) {
- // nothing to do
+ // fuse the two sret together
+ assert(retval.ispointer());
+ AllocaInst *result = cast(retval.V);
+ retval.V = sretPtr;
+ result->replaceAllUsesWith(sretPtr);
+ result->eraseFromParent();
r = NULL;
}
else if (!type_is_ghost(sig.lrt)) {
@@ -7761,14 +7489,6 @@ static Function *gen_cfun_wrapper(
ctx.builder.SetCurrentDebugLocation(noDbg);
ctx.builder.ClearInsertionPoint();
- if (aliasname) {
- auto alias = GlobalAlias::create(cw->getValueType(), cw->getType()->getAddressSpace(),
- GlobalValue::ExternalLinkage, aliasname, cw, M);
- if(ctx.emission_context.TargetTriple.isOSBinFormatCOFF()) {
- alias->setDLLStorageClass(GlobalValue::DLLStorageClassTypes::DLLExportStorageClass);
- }
- }
-
if (nest) {
funcName += "make";
Function *cw_make = Function::Create(
@@ -7797,6 +7517,26 @@ static Function *gen_cfun_wrapper(
return cw;
}
+static const char *derive_sigt_name(jl_value_t *jargty)
+{
+ jl_datatype_t *dt = (jl_datatype_t*)jl_argument_datatype(jargty);
+ if ((jl_value_t*)dt == jl_nothing)
+ return NULL;
+ jl_sym_t *name = dt->name->name;
+ // if we have a kwcall, use that as the name anyways
+ jl_methtable_t *mt = dt->name->mt;
+ if (mt == jl_type_type_mt || mt == jl_nonfunction_mt || mt == NULL) {
+ // our value for `name` from MethodTable is not good, try to come up with something better
+ if (jl_is_type_type((jl_value_t*)dt)) {
+ dt = (jl_datatype_t*)jl_argument_datatype(jl_tparam0(dt));
+ if ((jl_value_t*)dt != jl_nothing) {
+ name = dt->name->name;
+ }
+ }
+ }
+ return jl_symbol_name(name);
+}
+
// Get the LLVM Function* for the C-callable entry point for a certain function
// and argument types.
// here argt does not include the leading function type argument
@@ -7847,7 +7587,7 @@ static jl_cgval_t emit_cfunction(jl_codectx_t &ctx, jl_value_t *output_type, con
if (rt != declrt && rt != (jl_value_t*)jl_any_type)
jl_temporary_root(ctx, rt);
- function_sig_t sig("cfunction", lrt, rt, retboxed, argt, unionall_env, false, CallingConv::C, false, &ctx.emission_context);
+ function_sig_t sig("cfunction", lrt, rt, retboxed, false, argt, unionall_env, false, CallingConv::C, false, &ctx.emission_context);
assert(sig.fargt.size() + sig.sret == sig.fargt_sig.size());
if (!sig.err_msg.empty()) {
emit_error(ctx, sig.err_msg);
@@ -7896,13 +7636,11 @@ static jl_cgval_t emit_cfunction(jl_codectx_t &ctx, jl_value_t *output_type, con
return jl_cgval_t();
}
}
- size_t world = jl_atomic_load_acquire(&jl_world_counter);
- // try to look up this function for direct invoking
- jl_method_instance_t *lam = sigt ? jl_get_specialization1((jl_tupletype_t*)sigt, world, 0) : NULL;
+ const char *name = derive_sigt_name(fexpr_rt.typ);
Value *F = gen_cfun_wrapper(
jl_Module, ctx.emission_context,
- sig, fexpr_rt.constant, NULL,
- declrt, lam,
+ sig, fexpr_rt.constant, name,
+ declrt, sigt,
unionall_env, sparam_vals, &closure_types);
bool outboxed;
if (nest) {
@@ -7963,13 +7701,14 @@ static jl_cgval_t emit_cfunction(jl_codectx_t &ctx, jl_value_t *output_type, con
// do codegen to create a C-callable alias/wrapper, or if sysimg_handle is set,
// restore one from a loaded system image.
-const char *jl_generate_ccallable(Module *llvmmod, void *sysimg_handle, jl_value_t *declrt, jl_value_t *sigt, jl_codegen_params_t ¶ms)
+const char *jl_generate_ccallable(Module *llvmmod, jl_value_t *nameval, jl_value_t *declrt, jl_value_t *sigt, jl_codegen_params_t ¶ms)
{
++GeneratedCCallables;
jl_datatype_t *ft = (jl_datatype_t*)jl_tparam0(sigt);
+ assert(jl_is_datatype(ft));
jl_value_t *ff = ft->instance;
assert(ff);
- const char *name = jl_symbol_name(ft->name->mt->name);
+ const char *name = !jl_is_string(nameval) ? jl_symbol_name(ft->name->mt->name) : jl_string_data(nameval);
jl_value_t *crt = declrt;
if (jl_is_abstract_ref_type(declrt)) {
declrt = jl_tparam0(declrt);
@@ -7988,25 +7727,15 @@ const char *jl_generate_ccallable(Module *llvmmod, void *sysimg_handle, jl_value
}
jl_value_t *err;
{ // scope block for sig
- function_sig_t sig("cfunction", lcrt, crt, toboxed,
+ function_sig_t sig("cfunction", lcrt, crt, toboxed, false,
argtypes, NULL, false, CallingConv::C, false, ¶ms);
if (sig.err_msg.empty()) {
- size_t world = jl_atomic_load_acquire(&jl_world_counter);
- if (sysimg_handle) {
- // restore a ccallable from the system image
- void *addr;
- int found = jl_dlsym(sysimg_handle, name, &addr, 0);
- if (found)
- add_named_global(name, addr);
- else {
- err = jl_get_exceptionf(jl_errorexception_type, "%s not found in sysimg", name);
- jl_throw(err);
- }
- }
- else {
- jl_method_instance_t *lam = jl_get_specialization1((jl_tupletype_t*)sigt, world, 0);
- //Safe b/c params holds context lock
- gen_cfun_wrapper(llvmmod, params, sig, ff, name, declrt, lam, NULL, NULL, NULL);
+ //Safe b/c params holds context lock
+ Function *cw = gen_cfun_wrapper(llvmmod, params, sig, ff, name, declrt, sigt, NULL, NULL, NULL);
+ auto alias = GlobalAlias::create(cw->getValueType(), cw->getType()->getAddressSpace(),
+ GlobalValue::ExternalLinkage, name, cw, llvmmod);
+ if (params.TargetTriple.isOSBinFormatCOFF()) {
+ alias->setDLLStorageClass(GlobalValue::DLLStorageClassTypes::DLLExportStorageClass);
}
JL_GC_POP();
return name;
@@ -8018,7 +7747,7 @@ const char *jl_generate_ccallable(Module *llvmmod, void *sysimg_handle, jl_value
// generate a julia-callable function that calls f (AKA lam)
// if is_opaque_closure, then generate the OC invoke, rather than a real invoke
-static void gen_invoke_wrapper(jl_method_instance_t *lam, jl_value_t *abi, jl_value_t *jlretty, jl_returninfo_t &f, unsigned nargs, int retarg, bool is_opaque_closure, StringRef funcName,
+static void gen_invoke_wrapper(jl_method_instance_t *lam, jl_value_t *abi, jl_value_t *jlretty, jl_value_t *declrt, jl_returninfo_t &f, unsigned nargs, int retarg, bool is_opaque_closure, StringRef funcName,
Module *M, jl_codegen_params_t ¶ms)
{
++GeneratedInvokeWrappers;
@@ -8069,6 +7798,10 @@ static void gen_invoke_wrapper(jl_method_instance_t *lam, jl_value_t *abi, jl_va
argv[i] = mark_julia_type(ctx, theArg, true, ty);
}
jl_cgval_t retval = emit_call_specfun_other(ctx, is_opaque_closure, abi, jlretty, f, argv, nargs);
+ if (declrt != jlretty) {
+ emit_typecheck(ctx, retval, declrt, "cfunction");
+ retval = update_julia_type(ctx, retval, declrt);
+ }
if (retarg != -1) {
Value *theArg;
if (retarg == 0)
@@ -8085,20 +7818,22 @@ static void gen_invoke_wrapper(jl_method_instance_t *lam, jl_value_t *abi, jl_va
ctx.builder.CreateRet(boxed(ctx, retval));
}
-static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value *fval, StringRef name, jl_value_t *sig, jl_value_t *jlrettype, bool is_opaque_closure, bool gcstack_arg,
+static jl_returninfo_t get_specsig_function(jl_codegen_params_t ¶ms, Module *M, Value *fval, StringRef name, jl_value_t *sig, jl_value_t *jlrettype, bool is_opaque_closure,
ArrayRef ArgNames, unsigned nreq)
{
+ bool gcstack_arg = params.params->gcstack_arg;
jl_returninfo_t props = {};
SmallVector fsig;
SmallVector argnames;
Type *rt = NULL;
Type *srt = NULL;
+ Type *T_prjlvalue = PointerType::get(M->getContext(), AddressSpace::Tracked);
if (jlrettype == (jl_value_t*)jl_bottom_type) {
- rt = getVoidTy(ctx.builder.getContext());
+ rt = getVoidTy(M->getContext());
props.cc = jl_returninfo_t::Register;
}
else if (jl_is_structtype(jlrettype) && jl_is_datatype_singleton((jl_datatype_t*)jlrettype)) {
- rt = getVoidTy(ctx.builder.getContext());
+ rt = getVoidTy(M->getContext());
props.cc = jl_returninfo_t::Register;
}
else if (jl_is_uniontype(jlrettype)) {
@@ -8106,25 +7841,25 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value
union_alloca_type((jl_uniontype_t*)jlrettype, allunbox, props.union_bytes, props.union_align, props.union_minalign);
if (props.union_bytes) {
props.cc = jl_returninfo_t::Union;
- Type *AT = ArrayType::get(getInt8Ty(ctx.builder.getContext()), props.union_bytes);
+ Type *AT = ArrayType::get(getInt8Ty(M->getContext()), props.union_bytes);
fsig.push_back(AT->getPointerTo());
argnames.push_back("union_bytes_return");
- Type *pair[] = { ctx.types().T_prjlvalue, getInt8Ty(ctx.builder.getContext()) };
- rt = StructType::get(ctx.builder.getContext(), ArrayRef(pair));
+ Type *pair[] = { T_prjlvalue, getInt8Ty(M->getContext()) };
+ rt = StructType::get(M->getContext(), ArrayRef(pair));
}
else if (allunbox) {
props.cc = jl_returninfo_t::Ghosts;
- rt = getInt8Ty(ctx.builder.getContext());
+ rt = getInt8Ty(M->getContext());
}
else {
- rt = ctx.types().T_prjlvalue;
+ rt = T_prjlvalue;
}
}
else if (!deserves_retbox(jlrettype)) {
bool retboxed;
- rt = julia_type_to_llvm(ctx, jlrettype, &retboxed);
+ rt = _julia_type_to_llvm(¶ms, M->getContext(), jlrettype, &retboxed);
assert(!retboxed);
- if (rt != getVoidTy(ctx.builder.getContext()) && deserves_sret(jlrettype, rt)) {
+ if (rt != getVoidTy(M->getContext()) && deserves_sret(jlrettype, rt)) {
auto tracked = CountTrackedPointers(rt, true);
assert(!tracked.derived);
if (tracked.count && !tracked.all) {
@@ -8139,53 +7874,53 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value
fsig.push_back(rt->getPointerTo(M->getDataLayout().getAllocaAddrSpace()));
argnames.push_back("sret_return");
srt = rt;
- rt = getVoidTy(ctx.builder.getContext());
+ rt = getVoidTy(M->getContext());
}
else {
props.cc = jl_returninfo_t::Register;
}
}
else {
- rt = ctx.types().T_prjlvalue;
+ rt = T_prjlvalue;
}
SmallVector attrs; // function declaration attributes
if (props.cc == jl_returninfo_t::SRet) {
assert(srt);
- AttrBuilder param(ctx.builder.getContext());
+ AttrBuilder param(M->getContext());
param.addStructRetAttr(srt);
param.addAttribute(Attribute::NoAlias);
param.addAttribute(Attribute::NoCapture);
param.addAttribute(Attribute::NoUndef);
- attrs.push_back(AttributeSet::get(ctx.builder.getContext(), param));
+ attrs.push_back(AttributeSet::get(M->getContext(), param));
assert(fsig.size() == 1);
}
if (props.cc == jl_returninfo_t::Union) {
- AttrBuilder param(ctx.builder.getContext());
+ AttrBuilder param(M->getContext());
param.addAttribute(Attribute::NoAlias);
param.addAttribute(Attribute::NoCapture);
param.addAttribute(Attribute::NoUndef);
- attrs.push_back(AttributeSet::get(ctx.builder.getContext(), param));
+ attrs.push_back(AttributeSet::get(M->getContext(), param));
assert(fsig.size() == 1);
}
if (props.return_roots) {
- AttrBuilder param(ctx.builder.getContext());
+ AttrBuilder param(M->getContext());
param.addAttribute(Attribute::NoAlias);
param.addAttribute(Attribute::NoCapture);
param.addAttribute(Attribute::NoUndef);
- attrs.push_back(AttributeSet::get(ctx.builder.getContext(), param));
- fsig.push_back(ctx.types().T_ptr);
+ attrs.push_back(AttributeSet::get(M->getContext(), param));
+ fsig.push_back(getPointerTy(M->getContext()));
argnames.push_back("return_roots");
}
- if (gcstack_arg){
- AttrBuilder param(ctx.builder.getContext());
- if (ctx.emission_context.use_swiftcc)
+ if (gcstack_arg) {
+ AttrBuilder param(M->getContext());
+ if (params.use_swiftcc)
param.addAttribute(Attribute::SwiftSelf);
param.addAttribute(Attribute::NonNull);
- attrs.push_back(AttributeSet::get(ctx.builder.getContext(), param));
- fsig.push_back(PointerType::get(JuliaType::get_ppjlvalue_ty(ctx.builder.getContext()), 0));
+ attrs.push_back(AttributeSet::get(M->getContext(), param));
+ fsig.push_back(PointerType::get(M->getContext(), 0));
argnames.push_back("pgcstack_arg");
}
@@ -8198,18 +7933,18 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value
if (is_uniquerep_Type(jt))
continue;
isboxed = deserves_argbox(jt);
- et = isboxed ? ctx.types().T_prjlvalue : julia_type_to_llvm(ctx, jt);
+ et = isboxed ? T_prjlvalue : _julia_type_to_llvm(¶ms, M->getContext(), jt, nullptr);
if (type_is_ghost(et))
continue;
}
- AttrBuilder param(ctx.builder.getContext());
+ AttrBuilder param(M->getContext());
Type *ty = et;
if (et == nullptr || et->isAggregateType()) { // aggregate types are passed by pointer
param.addAttribute(Attribute::NoCapture);
param.addAttribute(Attribute::ReadOnly);
- ty = ctx.builder.getPtrTy(AddressSpace::Derived);
+ ty = PointerType::get(M->getContext(), AddressSpace::Derived);
}
- else if (isboxed && jl_is_immutable_datatype(jt)) {
+ else if (isboxed && jl_may_be_immutable_datatype(jt) && !jl_is_abstracttype(jt)) {
param.addAttribute(Attribute::ReadOnly);
}
else if (jl_is_primitivetype(jt) && ty->isIntegerTy()) {
@@ -8217,7 +7952,7 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value
Attribute::AttrKind attr = issigned ? Attribute::SExt : Attribute::ZExt;
param.addAttribute(attr);
}
- attrs.push_back(AttributeSet::get(ctx.builder.getContext(), param));
+ attrs.push_back(AttributeSet::get(M->getContext(), param));
fsig.push_back(ty);
size_t argno = i < nreq ? i : nreq;
std::string genname;
@@ -8233,8 +7968,8 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value
if (et && et->isAggregateType()) {
auto tracked = CountTrackedPointers(et);
if (tracked.count && !tracked.all) {
- attrs.push_back(AttributeSet::get(ctx.builder.getContext(), param));
- fsig.push_back(ctx.builder.getPtrTy(M->getDataLayout().getAllocaAddrSpace()));
+ attrs.push_back(AttributeSet::get(M->getContext(), param));
+ fsig.push_back(PointerType::get(M->getContext(), M->getDataLayout().getAllocaAddrSpace()));
if (!genname.empty())
argnames.push_back((Twine(".roots.") + genname).str());
}
@@ -8244,25 +7979,26 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value
AttributeSet FnAttrs;
AttributeSet RetAttrs;
if (jlrettype == (jl_value_t*)jl_bottom_type)
- FnAttrs = FnAttrs.addAttribute(ctx.builder.getContext(), Attribute::NoReturn);
- else if (rt == ctx.types().T_prjlvalue)
- RetAttrs = RetAttrs.addAttribute(ctx.builder.getContext(), Attribute::NonNull);
- AttributeList attributes = AttributeList::get(ctx.builder.getContext(), FnAttrs, RetAttrs, attrs);
+ FnAttrs = FnAttrs.addAttribute(M->getContext(), Attribute::NoReturn);
+ else if (rt == T_prjlvalue)
+ RetAttrs = RetAttrs.addAttribute(M->getContext(), Attribute::NonNull);
+ AttributeList attributes = AttributeList::get(M->getContext(), FnAttrs, RetAttrs, attrs);
FunctionType *ftype = FunctionType::get(rt, fsig, false);
if (fval == NULL) {
Function *f = M ? cast_or_null(M->getNamedValue(name)) : NULL;
if (f == NULL) {
f = Function::Create(ftype, GlobalVariable::ExternalLinkage, name, M);
- jl_init_function(f, ctx.emission_context.TargetTriple);
- if (ctx.emission_context.params->debug_info_level >= 2) {
+ jl_init_function(f, params.TargetTriple);
+ if (params.params->debug_info_level >= 2) {
ios_t sigbuf;
ios_mem(&sigbuf, 0);
jl_static_show_func_sig((JL_STREAM*) &sigbuf, sig);
- f->setAttributes(AttributeList::get(f->getContext(), {attributes.addFnAttribute(ctx.builder.getContext(),"julia.fsig", StringRef(sigbuf.buf, sigbuf.size)), f->getAttributes()}));
+ f->setAttributes(AttributeList::get(f->getContext(), {attributes.addFnAttribute(M->getContext(),"julia.fsig", StringRef(sigbuf.buf, sigbuf.size)), f->getAttributes()}));
ios_close(&sigbuf);
- } else
+ } else {
f->setAttributes(AttributeList::get(f->getContext(), {attributes, f->getAttributes()}));
+ }
}
else {
assert(f->getFunctionType() == ftype);
@@ -8270,11 +8006,10 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value
fval = f;
}
else {
- if (fval->getType()->isIntegerTy())
- fval = emit_inttoptr(ctx, fval, ftype->getPointerTo());
+ assert(fval->getType()->isPointerTy());
}
if (auto F = dyn_cast(fval)) {
- if (gcstack_arg && ctx.emission_context.use_swiftcc)
+ if (gcstack_arg && params.use_swiftcc)
F->setCallingConv(CallingConv::Swift);
assert(F->arg_size() >= argnames.size());
for (size_t i = 0; i < argnames.size(); i++) {
@@ -8321,25 +8056,6 @@ static jl_datatype_t *compute_va_type(jl_value_t *sig, size_t nreq)
return (jl_datatype_t*)typ;
}
-static std::string get_function_name(bool specsig, bool needsparams, const char *unadorned_name, const Triple &TargetTriple)
-{
- std::string _funcName;
- raw_string_ostream funcName(_funcName);
- // try to avoid conflicts in the global symbol table
- if (specsig)
- funcName << "julia_"; // api 5
- else if (needsparams)
- funcName << "japi3_";
- else
- funcName << "japi1_";
- if (TargetTriple.isOSLinux()) {
- if (unadorned_name[0] == '@')
- unadorned_name++;
- }
- funcName << unadorned_name << "_" << jl_atomic_fetch_add_relaxed(&globalUniqueGeneratedNames, 1);
- return funcName.str();
-}
-
// Compile to LLVM IR, using a specialized signature if applicable.
static jl_llvm_functions_t
emit_function(
@@ -8555,8 +8271,8 @@ static jl_llvm_functions_t
ArgNames[i] = name;
}
}
- returninfo = get_specsig_function(ctx, M, NULL, declarations.specFunctionObject, abi,
- jlrettype, ctx.is_opaque_closure, JL_FEAT_TEST(ctx,gcstack_arg),
+ returninfo = get_specsig_function(params, M, NULL, declarations.specFunctionObject, abi,
+ jlrettype, ctx.is_opaque_closure,
ArgNames, nreq);
f = cast(returninfo.decl.getCallee());
has_sret = (returninfo.cc == jl_returninfo_t::SRet || returninfo.cc == jl_returninfo_t::Union);
@@ -8590,7 +8306,7 @@ static jl_llvm_functions_t
raw_string_ostream(wrapName) << "jfptr_" << ctx.name << "_" << jl_atomic_fetch_add_relaxed(&globalUniqueGeneratedNames, 1);
declarations.functionObject = wrapName;
size_t nparams = jl_nparams(abi);
- gen_invoke_wrapper(lam, abi, jlrettype, returninfo, nparams, retarg, ctx.is_opaque_closure, declarations.functionObject, M, ctx.emission_context);
+ gen_invoke_wrapper(lam, abi, jlrettype, jlrettype, returninfo, nparams, retarg, ctx.is_opaque_closure, declarations.functionObject, M, ctx.emission_context);
// TODO: add attributes: maybe_mark_argument_dereferenceable(Arg, argType)
// TODO: add attributes: dereferenceable
// TODO: (if needsparams) add attributes: dereferenceable, readonly, nocapture
@@ -8988,11 +8704,12 @@ static jl_llvm_functions_t
++AI; // both specsig (derived) and fptr1 (box) pass this argument as a distinct argument
// Load closure world
Value *worldaddr = emit_ptrgep(ctx, oc_this, offsetof(jl_opaque_closure_t, world));
+ Align alignof_ptr(ctx.types().alignof_ptr);
jl_cgval_t closure_world = typed_load(ctx, worldaddr, NULL, (jl_value_t*)jl_long_type,
- nullptr, nullptr, false, AtomicOrdering::NotAtomic, false, ctx.types().alignof_ptr.value());
+ nullptr, nullptr, false, AtomicOrdering::NotAtomic, false, alignof_ptr.value());
assert(ctx.world_age_at_entry == nullptr);
ctx.world_age_at_entry = closure_world.V; // The tls world in a OC is the world of the closure
- emit_unbox_store(ctx, closure_world, world_age_field, ctx.tbaa().tbaa_gcframe, ctx.types().alignof_ptr);
+ emit_unbox_store(ctx, closure_world, world_age_field, ctx.tbaa().tbaa_gcframe, alignof_ptr, alignof_ptr);
if (s == jl_unused_sym || vi.value.constant)
continue;
@@ -9750,7 +9467,7 @@ static jl_llvm_functions_t
if (tracked)
split_value_into(ctx, typedval, align, dest, align, jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack), mayberoots);
else
- emit_unbox_store(ctx, typedval, dest, ctx.tbaa().tbaa_stack, align);
+ emit_unbox_store(ctx, typedval, dest, ctx.tbaa().tbaa_stack, align, align);
}
return mayberoots;
});
@@ -9785,8 +9502,10 @@ static jl_llvm_functions_t
else {
if (VN)
V = Constant::getNullValue(ctx.types().T_prjlvalue);
- if (dest)
- emit_unbox_store(ctx, val, dest, ctx.tbaa().tbaa_stack, Align(julia_alignment(val.typ)));
+ if (dest) {
+ Align align(julia_alignment(val.typ));
+ emit_unbox_store(ctx, val, dest, ctx.tbaa().tbaa_stack, align, align);
+ }
RTindex = ConstantInt::get(getInt8Ty(ctx.builder.getContext()), tindex);
}
}
@@ -9958,6 +9677,39 @@ static jl_llvm_functions_t
// --- entry point ---
+jl_llvm_functions_t jl_emit_codedecls(
+ orc::ThreadSafeModule &M,
+ jl_code_instance_t *codeinst,
+ jl_codegen_params_t ¶ms)
+{
+ jl_llvm_functions_t decls = {};
+ jl_method_instance_t *mi = jl_get_ci_mi(codeinst);
+ bool specsig, needsparams;
+ std::tie(specsig, needsparams) = uses_specsig(get_ci_abi(codeinst), mi, codeinst->rettype, params.params->prefer_specsig);
+ const char *name = name_from_method_instance(mi);
+ if (specsig)
+ raw_string_ostream(decls.functionObject) << "jfptr_" << name << "_" << jl_atomic_fetch_add_relaxed(&globalUniqueGeneratedNames, 1);
+ else if (needsparams)
+ decls.functionObject = "jl_fptr_sparam";
+ else
+ decls.functionObject = "jl_fptr_args";
+ raw_string_ostream(decls.specFunctionObject) << (specsig ? "j_" : "j1_") << name << "_" << jl_atomic_fetch_add_relaxed(&globalUniqueGeneratedNames, 1);
+ M.withModuleDo([&](Module &M) {
+ bool is_opaque_closure = jl_is_method(mi->def.value) && mi->def.method->is_for_opaque_closure;
+ if (specsig) {
+ get_specsig_function(params, &M, nullptr, decls.specFunctionObject, get_ci_abi(codeinst), codeinst->rettype, is_opaque_closure);
+ }
+ else {
+ Function *f = Function::Create(needsparams ? JuliaType::get_jlfuncparams_ty(M.getContext()) : JuliaType::get_jlfunc_ty(M.getContext()),
+ GlobalVariable::ExternalLinkage,
+ decls.specFunctionObject, M);
+ jl_init_function(f, params.TargetTriple);
+ f->setAttributes(AttributeList::get(M.getContext(), {get_func_attrs(M.getContext()), f->getAttributes()}));
+ }
+ });
+ return decls;
+}
+
jl_llvm_functions_t jl_emit_code(
orc::ThreadSafeModule &m,
jl_method_instance_t *li,
@@ -10012,7 +9764,7 @@ static jl_llvm_functions_t jl_emit_oc_wrapper(orc::ThreadSafeModule &m, jl_codeg
jl_codectx_t ctx(M->getContext(), params, 0, 0);
ctx.name = M->getModuleIdentifier().data();
std::string funcName = get_function_name(true, false, ctx.name, ctx.emission_context.TargetTriple);
- jl_returninfo_t returninfo = get_specsig_function(ctx, M, NULL, funcName, mi->specTypes, rettype, true, JL_FEAT_TEST(ctx,gcstack_arg));
+ jl_returninfo_t returninfo = get_specsig_function(params, M, NULL, funcName, mi->specTypes, rettype, true);
Function *gf_thunk = cast(returninfo.decl.getCallee());
jl_init_function(gf_thunk, ctx.emission_context.TargetTriple);
size_t nrealargs = jl_nparams(mi->specTypes);
@@ -10032,8 +9784,8 @@ jl_llvm_functions_t jl_emit_codeinst(
{
JL_TIMING(CODEGEN, CODEGEN_Codeinst);
jl_timing_show_method_instance(jl_get_ci_mi(codeinst), JL_TIMING_DEFAULT_BLOCK);
+ jl_method_instance_t *mi = jl_get_ci_mi(codeinst);
if (!src) {
- jl_method_instance_t *mi = jl_get_ci_mi(codeinst);
// Assert that this this is the generic method for opaque closure wrappers:
// this signals to instead compile specptr such that it holds the specptr -> invoke wrapper
// to satisfy the dispatching implementation requirements of jl_f_opaque_closure_call
@@ -10044,7 +9796,7 @@ jl_llvm_functions_t jl_emit_codeinst(
return jl_llvm_functions_t(); // user error
}
//assert(jl_egal((jl_value_t*)jl_atomic_load_relaxed(&codeinst->debuginfo), (jl_value_t*)src->debuginfo) && "trying to generate code for a codeinst for an incompatible src");
- jl_llvm_functions_t decls = jl_emit_code(m, jl_get_ci_mi(codeinst), src, get_ci_abi(codeinst), codeinst->rettype, params);
+ jl_llvm_functions_t decls = jl_emit_code(m, mi, src, get_ci_abi(codeinst), codeinst->rettype, params);
return decls;
}
@@ -10102,7 +9854,6 @@ static void init_jit_functions(void)
add_named_global(memcmp_func, &memcmp);
add_named_global(jltypeerror_func, &jl_type_error);
add_named_global(jlcheckassign_func, &jl_checked_assignment);
- add_named_global(jlgetbindingorerror_func, &jl_get_binding_or_error);
add_named_global(jlcheckbpwritable_func, &jl_check_binding_currently_writable);
add_named_global(jlboundp_func, &jl_boundp);
for (auto it : builtin_func_map())
@@ -10152,6 +9903,7 @@ static void init_jit_functions(void)
add_named_global(jlunlockvalue_func, &jl_unlock_value);
add_named_global(jllockfield_func, &jl_lock_field);
add_named_global(jlunlockfield_func, &jl_unlock_field);
+ add_named_global(jlgetabiconverter_func, &jl_get_abi_converter);
#ifdef _OS_WINDOWS_
#if defined(_CPU_X86_64_)
diff --git a/src/datatype.c b/src/datatype.c
index fd25cca503676..3f9679ec54618 100644
--- a/src/datatype.c
+++ b/src/datatype.c
@@ -80,6 +80,7 @@ JL_DLLEXPORT jl_typename_t *jl_new_typename_in(jl_sym_t *name, jl_module_t *modu
tn->partial = NULL;
tn->atomicfields = NULL;
tn->constfields = NULL;
+ jl_atomic_store_relaxed(&tn->cache_entry_count, 0);
tn->max_methods = 0;
tn->constprop_heustic = 0;
return tn;
@@ -300,9 +301,10 @@ static jl_datatype_layout_t *jl_get_layout(uint32_t sz,
}
// Determine if homogeneous tuple with fields of type t will have
-// a special alignment beyond normal Julia rules.
+// a special alignment and vector-ABI beyond normal rules for aggregates.
// Return special alignment if one exists, 0 if normal alignment rules hold.
// A non-zero result *must* match the LLVM rules for a vector type .
+// Matching the compiler's `__attribute__ vector_size` behavior.
// For sake of Ahead-Of-Time (AOT) compilation, this routine has to work
// without LLVM being available.
unsigned jl_special_vector_alignment(size_t nfields, jl_value_t *t)
@@ -317,8 +319,12 @@ unsigned jl_special_vector_alignment(size_t nfields, jl_value_t *t)
// motivating use case comes up for Julia, we reject pointers.
return 0;
size_t elsz = jl_datatype_size(ty);
- if (elsz != 1 && elsz != 2 && elsz != 4 && elsz != 8)
- // Only handle power-of-two-sized elements (for now)
+ if (next_power_of_two(elsz) != elsz)
+ // Only handle power-of-two-sized elements (for now), since other
+ // lengths may be packed into very complicated arrangements (llvm pads
+ // extra bits on most platforms when computing alignment but not when
+ // computing type size, but adds no extra bytes for each element, so
+ // their effect on offsets are never what you may naturally expect).
return 0;
size_t size = nfields * elsz;
// Use natural alignment for this vector: this matches LLVM and clang.
@@ -495,7 +501,13 @@ void jl_get_genericmemory_layout(jl_datatype_t *st)
jl_value_t *kind = jl_tparam0(st);
jl_value_t *eltype = jl_tparam1(st);
jl_value_t *addrspace = jl_tparam2(st);
- if (!jl_is_typevar(eltype) && !jl_is_type(eltype)) {
+ if (!st->isconcretetype) {
+ // Since parent dt has an opaque layout, we may end up here being asked to copy that layout to subtypes,
+ // but we don't actually want to do that unless this object is constructable (or at least has a layout).
+ // The real layout is stored only on the wrapper.
+ return;
+ }
+ if (!jl_is_type(eltype)) {
// this is expected to have a layout, but since it is not constructable, we don't care too much what it is
static const jl_datatype_layout_t opaque_ptr_layout = {0, 0, 1, -1, sizeof(void*), {0}};
st->layout = &opaque_ptr_layout;
@@ -723,9 +735,9 @@ void jl_compute_field_offsets(jl_datatype_t *st)
}
else {
fsz = sizeof(void*);
- if (fsz > MAX_ALIGN)
- fsz = MAX_ALIGN;
al = fsz;
+ if (al > MAX_ALIGN)
+ al = MAX_ALIGN;
desc[i].isptr = 1;
zeroinit = 1;
npointers++;
@@ -769,8 +781,6 @@ void jl_compute_field_offsets(jl_datatype_t *st)
if (al > alignm)
alignm = al;
}
- if (alignm > MAX_ALIGN)
- alignm = MAX_ALIGN; // We cannot guarantee alignments over 16 bytes because that's what our heap is aligned as
if (LLT_ALIGN(sz, alignm) > sz) {
haspadding = 1;
sz = LLT_ALIGN(sz, alignm);
@@ -939,6 +949,14 @@ JL_DLLEXPORT jl_datatype_t *jl_new_primitivetype(jl_value_t *name, jl_module_t *
uint32_t nbytes = (nbits + 7) / 8;
uint32_t alignm = next_power_of_two(nbytes);
# if defined(_CPU_X86_) && !defined(_OS_WINDOWS_)
+ // datalayout strings are often weird: on 64-bit they usually follow fairly simple rules,
+ // but on x86 32 bit platforms, sometimes 5 to 8 byte types are
+ // 32-bit aligned even though the MAX_ALIGN (for types 9+ bytes) is 16
+ // (except for f80 which is align 4 on Mingw, Linux, and BSDs--but align 16 on MSVC and Darwin)
+ // https://llvm.org/doxygen/ARMTargetMachine_8cpp.html#adb29b487708f0dc2a940345b68649270
+ // https://llvm.org/doxygen/AArch64TargetMachine_8cpp.html#a003a58caf135efbf7273c5ed84e700d7
+ // https://llvm.org/doxygen/X86TargetMachine_8cpp.html#aefdbcd6131ef195da070cef7fdaf0532
+ // 32-bit alignment is weird
if (alignm == 8)
alignm = 4;
# endif
diff --git a/src/dlload.c b/src/dlload.c
index 91980cc4ecbbf..2c7ee08229394 100644
--- a/src/dlload.c
+++ b/src/dlload.c
@@ -240,21 +240,32 @@ JL_DLLEXPORT int jl_dlclose(void *handle) JL_NOTSAFEPOINT
#endif
}
-void *jl_find_dynamic_library_by_addr(void *symbol) {
+void *jl_find_dynamic_library_by_addr(void *symbol, int throw_err) {
void *handle;
#ifdef _OS_WINDOWS_
if (!GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
(LPCWSTR)symbol,
(HMODULE*)&handle)) {
- jl_error("could not load base module");
+ if (throw_err)
+ jl_error("could not load base module");
+ return NULL;
}
#else
Dl_info info;
if (!dladdr(symbol, &info) || !info.dli_fname) {
- jl_error("could not load base module");
+ if (throw_err)
+ jl_error("could not load base module");
+ return NULL;
}
handle = dlopen(info.dli_fname, RTLD_NOW | RTLD_NOLOAD | RTLD_LOCAL);
- dlclose(handle); // Undo ref count increment from `dlopen`
+#if !defined(__APPLE__)
+ if (handle == RTLD_DEFAULT && (RTLD_DEFAULT != NULL || dlerror() == NULL)) {
+ // We loaded the executable but got RTLD_DEFAULT back, ask for a real handle instead
+ handle = dlopen("", RTLD_NOW | RTLD_NOLOAD | RTLD_LOCAL);
+ }
+#endif
+ if (handle != NULL)
+ dlclose(handle); // Undo ref count increment from `dlopen`
#endif
return handle;
}
@@ -277,7 +288,7 @@ JL_DLLEXPORT void *jl_load_dynamic_library(const char *modname, unsigned flags,
// modname == NULL is a sentinel value requesting the handle of libjulia-internal
if (modname == NULL)
- return jl_find_dynamic_library_by_addr(&jl_load_dynamic_library);
+ return jl_find_dynamic_library_by_addr(&jl_load_dynamic_library, throw_err);
abspath = jl_isabspath(modname);
is_atpath = 0;
@@ -307,9 +318,9 @@ JL_DLLEXPORT void *jl_load_dynamic_library(const char *modname, unsigned flags,
While these exist as OS concepts on Darwin, we want to use them on other platforms
such as Windows, so we emulate them here.
*/
- if (!abspath && !is_atpath && jl_base_module != NULL) {
+ if (!abspath && !is_atpath && jl_base_module != NULL && jl_typeinf_world != 1) {
jl_binding_t *b = jl_get_module_binding(jl_base_module, jl_symbol("DL_LOAD_PATH"), 0);
- jl_array_t *DL_LOAD_PATH = (jl_array_t*)(b ? jl_get_binding_value(b) : NULL);
+ jl_array_t *DL_LOAD_PATH = (jl_array_t*)(b ? jl_get_binding_value_in_world(b, jl_typeinf_world) : NULL);
if (DL_LOAD_PATH != NULL) {
size_t j;
for (j = 0; j < jl_array_nrows(DL_LOAD_PATH); j++) {
diff --git a/src/flisp/Makefile b/src/flisp/Makefile
index 17292d301115b..eca1de86e588a 100644
--- a/src/flisp/Makefile
+++ b/src/flisp/Makefile
@@ -111,10 +111,10 @@ $(BUILDDIR)/$(EXENAME)$(EXE): $(OBJS) $(LIBFILES_release) $(BUILDDIR)/$(LIBTARGE
$(BUILDDIR)/host/Makefile:
mkdir -p $(BUILDDIR)/host
@# add Makefiles to the build directories for convenience (pointing back to the source location of each)
- @echo '# -- This file is automatically generated in julia/src/flisp/Makefile -- #' > $@
- @echo 'BUILDDIR=$(BUILDDIR)/host' >> $@
- @echo 'BUILDING_HOST_TOOLS=1' >> $@
- @echo 'include $(SRCDIR)/Makefile' >> $@
+ @printf "%s\n" '# -- This file is automatically generated in julia/src/flisp/Makefile -- #' > $@
+ @printf "%s\n" 'BUILDDIR=$(BUILDDIR)/host' >> $@
+ @printf "%s\n" 'BUILDING_HOST_TOOLS=1' >> $@
+ @printf "%s\n" 'include $(SRCDIR)/Makefile' >> $@
$(BUILDDIR)/host/$(EXENAME): $(BUILDDIR)/host/Makefile | ${BUILDDIR}/host/flisp.boot
make -C $(BUILDDIR)/host $(EXENAME)
diff --git a/src/gc-pages.c b/src/gc-pages.c
index 71d59de29166f..79dd8993a8861 100644
--- a/src/gc-pages.c
+++ b/src/gc-pages.c
@@ -28,6 +28,28 @@ JL_DLLEXPORT uint64_t jl_get_pg_size(void)
static int block_pg_cnt = DEFAULT_BLOCK_PG_ALLOC;
+// Julia allocates large blocks (64M) with mmap. These are never
+// unmapped but the underlying physical memory may be released
+// with calls to madvise(MADV_DONTNEED).
+static uint64_t poolmem_blocks_allocated_total = 0;
+
+JL_DLLEXPORT uint64_t jl_poolmem_blocks_allocated_total(void)
+{
+ return poolmem_blocks_allocated_total;
+}
+
+JL_DLLEXPORT uint64_t jl_poolmem_bytes_allocated(void)
+{
+ return jl_atomic_load_relaxed(&gc_heap_stats.bytes_resident);
+}
+
+JL_DLLEXPORT uint64_t jl_current_pg_count(void)
+{
+ assert(jl_page_size == GC_PAGE_SZ && "RAI fork of Julia should be running on platforms for which jl_page_size == GC_PAGE_SZ");
+ size_t nb = jl_atomic_load_relaxed(&gc_heap_stats.bytes_resident);
+ return nb / GC_PAGE_SZ; // exact division
+}
+
void jl_gc_init_page(void)
{
if (GC_PAGE_SZ * block_pg_cnt < jl_page_size)
@@ -55,6 +77,11 @@ char *jl_gc_try_alloc_pages_(int pg_cnt) JL_NOTSAFEPOINT
MAP_NORESERVE | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mem == MAP_FAILED)
return NULL;
+
+#ifdef MADV_NOHUGEPAGE
+ madvise(mem, pages_sz, MADV_NOHUGEPAGE);
+#endif
+
#endif
if (GC_PAGE_SZ > jl_page_size)
// round data pointer up to the nearest gc_page_data-aligned
@@ -62,6 +89,7 @@ char *jl_gc_try_alloc_pages_(int pg_cnt) JL_NOTSAFEPOINT
mem = (char*)gc_page_data(mem + GC_PAGE_SZ - 1);
jl_atomic_fetch_add_relaxed(&gc_heap_stats.bytes_mapped, pages_sz);
jl_atomic_fetch_add_relaxed(&gc_heap_stats.bytes_resident, pages_sz);
+ poolmem_blocks_allocated_total++; // RAI-specific
return mem;
}
@@ -184,7 +212,7 @@ void jl_gc_free_page(jl_gc_pagemeta_t *pg) JL_NOTSAFEPOINT
}
#ifdef _OS_WINDOWS_
VirtualFree(p, decommit_size, MEM_DECOMMIT);
-#elif defined(MADV_FREE)
+#elif 0
static int supports_madv_free = 1;
if (supports_madv_free) {
if (madvise(p, decommit_size, MADV_FREE) == -1) {
diff --git a/src/gc-stacks.c b/src/gc-stacks.c
index 9387c7fb065ec..55c4a470dd33e 100644
--- a/src/gc-stacks.c
+++ b/src/gc-stacks.c
@@ -72,8 +72,10 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
munmap(stk, bufsz);
return MAP_FAILED;
}
-# endif
-
+#ifdef MADV_NOHUGEPAGE
+ madvise(stk, bufsz, MADV_NOHUGEPAGE);
+#endif
+#endif
jl_atomic_fetch_add_relaxed(&num_stack_mappings, 1);
return stk;
}
diff --git a/src/gc-stock.c b/src/gc-stock.c
index 8118b3c5629ae..090cc12063d50 100644
--- a/src/gc-stock.c
+++ b/src/gc-stock.c
@@ -948,6 +948,7 @@ static void gc_sweep_page(gc_page_profiler_serializer_t *s, jl_gc_pool_t *p, jl_
done:
if (re_use_page) {
+ gc_update_page_fragmentation_data(pg);
push_lf_back(allocd, pg);
}
else {
@@ -956,7 +957,6 @@ static void gc_sweep_page(gc_page_profiler_serializer_t *s, jl_gc_pool_t *p, jl_
push_lf_back(&global_page_pool_lazily_freed, pg);
}
gc_page_profile_write_to_file(s);
- gc_update_page_fragmentation_data(pg);
gc_time_count_page(freedall, pg_skpd);
jl_ptls_t ptls = jl_current_task->ptls;
// Note that we aggregate the `pool_live_bytes` over all threads before returning this
@@ -2144,6 +2144,12 @@ STATIC_INLINE void gc_mark_module_binding(jl_ptls_t ptls, jl_module_t *mb_parent
gc_heap_snapshot_record_module_to_binding(mb_parent, bindings, bindingkeyset);
gc_assert_parent_validity((jl_value_t *)mb_parent, (jl_value_t *)mb_parent->parent);
gc_try_claim_and_push(mq, (jl_value_t *)mb_parent->parent, &nptr);
+ gc_assert_parent_validity((jl_value_t *)mb_parent, (jl_value_t *)mb_parent->usings_backedges);
+ gc_try_claim_and_push(mq, (jl_value_t *)mb_parent->usings_backedges, &nptr);
+ gc_heap_snapshot_record_binding_partition_edge((jl_value_t*)mb_parent, mb_parent->usings_backedges);
+ gc_assert_parent_validity((jl_value_t *)mb_parent, (jl_value_t *)mb_parent->scanned_methods);
+ gc_try_claim_and_push(mq, (jl_value_t *)mb_parent->scanned_methods, &nptr);
+ gc_heap_snapshot_record_binding_partition_edge((jl_value_t*)mb_parent, mb_parent->scanned_methods);
size_t nusings = module_usings_length(mb_parent);
if (nusings > 0) {
// this is only necessary because bindings for "using" modules
@@ -2445,16 +2451,6 @@ FORCE_INLINE void gc_mark_outrefs(jl_ptls_t ptls, jl_gc_markqueue_t *mq, void *_
if (npointers == 0)
return;
uintptr_t nptr = (npointers << 2 | (bits & GC_OLD));
- if (vt == jl_binding_partition_type) {
- // BindingPartition has a special union of jl_value_t and flag bits
- // but is otherwise regular.
- jl_binding_partition_t *bpart = (jl_binding_partition_t*)jl_valueof(o);
- jl_value_t *val = decode_restriction_value(
- jl_atomic_load_relaxed(&bpart->restriction));
- if (val)
- gc_heap_snapshot_record_binding_partition_edge((jl_value_t*)bpart, val);
- gc_try_claim_and_push(mq, val, &nptr);
- }
assert((layout->nfields > 0 || layout->flags.fielddesc_type == 3) &&
"opaque types should have been handled specially");
if (layout->flags.fielddesc_type == 0) {
@@ -3399,6 +3395,9 @@ static int _jl_gc_collect(jl_ptls_t ptls, jl_gc_collection_t collection)
return recollect;
}
+extern int jl_heartbeat_pause(void);
+extern int jl_heartbeat_resume(void);
+
JL_DLLEXPORT void jl_gc_collect(jl_gc_collection_t collection)
{
JL_PROBE_GC_BEGIN(collection);
@@ -3441,6 +3440,7 @@ JL_DLLEXPORT void jl_gc_collect(jl_gc_collection_t collection)
// existence of the thread in the jl_n_threads count.
//
// TODO: concurrently queue objects
+ jl_heartbeat_pause();
jl_fence();
gc_n_threads = jl_atomic_load_acquire(&jl_n_threads);
gc_all_tls_states = jl_atomic_load_relaxed(&jl_all_tls_states);
@@ -3472,6 +3472,7 @@ JL_DLLEXPORT void jl_gc_collect(jl_gc_collection_t collection)
gc_n_threads = 0;
gc_all_tls_states = NULL;
+ jl_heartbeat_resume();
jl_safepoint_end_gc();
jl_gc_state_set(ptls, old_state, JL_GC_STATE_WAITING);
JL_PROBE_GC_END();
@@ -3908,6 +3909,9 @@ void *jl_gc_perm_alloc_nolock(size_t sz, int zero, unsigned align, unsigned offs
errno = last_errno;
if (__unlikely(pool == MAP_FAILED))
return NULL;
+#ifdef MADV_NOHUGEPAGE
+ madvise(pool, GC_PERM_POOL_SIZE, MADV_NOHUGEPAGE);
+#endif
#endif
gc_perm_pool = (uintptr_t)pool;
gc_perm_end = gc_perm_pool + GC_PERM_POOL_SIZE;
diff --git a/src/gc-stock.h b/src/gc-stock.h
index d478ee1366da0..41e6151605d80 100644
--- a/src/gc-stock.h
+++ b/src/gc-stock.h
@@ -499,6 +499,9 @@ extern uv_sem_t gc_sweep_assists_needed;
extern _Atomic(int) gc_n_threads_marking;
extern _Atomic(int) gc_n_threads_sweeping_pools;
extern _Atomic(int) n_threads_running;
+extern _Atomic(int) gc_n_threads_sweeping_stacks;
+extern _Atomic(int) gc_ptls_sweep_idx;
+extern _Atomic(int) gc_stack_free_idx;
extern uv_barrier_t thread_init_done;
void gc_mark_queue_all_roots(jl_ptls_t ptls, jl_gc_markqueue_t *mq);
void gc_mark_finlist_(jl_gc_markqueue_t *mq, jl_value_t *fl_parent, jl_value_t **fl_begin, jl_value_t **fl_end) JL_NOTSAFEPOINT;
diff --git a/src/gf.c b/src/gf.c
index 710dda208f0b2..f1deffc954b41 100644
--- a/src/gf.c
+++ b/src/gf.c
@@ -294,7 +294,7 @@ jl_datatype_t *jl_mk_builtin_func(jl_datatype_t *dt, const char *name, jl_fptr_a
if (dt == NULL) {
// Builtins are specially considered available from world 0
jl_value_t *f = jl_new_generic_function_with_supertype(sname, jl_core_module, jl_builtin_type, 0);
- jl_set_const(jl_core_module, sname, f);
+ jl_set_initial_const(jl_core_module, sname, f, 0);
dt = (jl_datatype_t*)jl_typeof(f);
}
@@ -304,7 +304,7 @@ jl_datatype_t *jl_mk_builtin_func(jl_datatype_t *dt, const char *name, jl_fptr_a
m->isva = 1;
m->nargs = 2;
jl_atomic_store_relaxed(&m->primary_world, 1);
- jl_atomic_store_relaxed(&m->deleted_world, ~(size_t)0);
+ jl_atomic_store_relaxed(&m->dispatch_status, METHOD_SIG_LATEST_ONLY | METHOD_SIG_LATEST_ONLY);
m->sig = (jl_value_t*)jl_anytuple_type;
m->slot_syms = jl_an_empty_string;
m->nospecialize = 0;
@@ -315,7 +315,7 @@ jl_datatype_t *jl_mk_builtin_func(jl_datatype_t *dt, const char *name, jl_fptr_a
JL_GC_PUSH2(&m, &newentry);
newentry = jl_typemap_alloc(jl_anytuple_type, NULL, jl_emptysvec,
- (jl_value_t*)m, jl_atomic_load_relaxed(&m->primary_world), jl_atomic_load_relaxed(&m->deleted_world));
+ (jl_value_t*)m, 1, ~(size_t)0);
jl_typemap_insert(&mt->defs, (jl_value_t*)mt, newentry, jl_cachearg_offset(mt));
jl_method_instance_t *mi = jl_get_specialized(m, (jl_value_t*)jl_anytuple_type, jl_emptysvec);
@@ -532,7 +532,10 @@ JL_DLLEXPORT jl_value_t *jl_call_in_typeinf_world(jl_value_t **args, int nargs)
jl_task_t *ct = jl_current_task;
size_t last_age = ct->world_age;
ct->world_age = jl_typeinf_world;
+ int last_pure = ct->ptls->in_pure_callback;
+ ct->ptls->in_pure_callback = 0;
jl_value_t *ret = jl_apply(args, nargs);
+ ct->ptls->in_pure_callback = last_pure;
ct->world_age = last_age;
return ret;
}
@@ -582,8 +585,8 @@ JL_DLLEXPORT int jl_mi_cache_has_ci(jl_method_instance_t *mi,
return 0;
}
-// look for something with an egal ABI and properties that is already in the JIT (compiled=true) or simply in the cache (compiled=false)
-JL_DLLEXPORT jl_code_instance_t *jl_get_ci_equiv(jl_code_instance_t *ci JL_PROPAGATES_ROOT, int compiled) JL_NOTSAFEPOINT
+// look for something with an egal ABI and properties that is already in the JIT for a whole edge (target_world=0) or can be added to the JIT with new source just for target_world.
+JL_DLLEXPORT jl_code_instance_t *jl_get_ci_equiv(jl_code_instance_t *ci JL_PROPAGATES_ROOT, size_t target_world) JL_NOTSAFEPOINT
{
jl_value_t *def = ci->def;
jl_method_instance_t *mi = jl_get_ci_mi(ci);
@@ -595,9 +598,9 @@ JL_DLLEXPORT jl_code_instance_t *jl_get_ci_equiv(jl_code_instance_t *ci JL_PROPA
while (codeinst) {
if (codeinst != ci &&
jl_atomic_load_relaxed(&codeinst->inferred) != NULL &&
- (!compiled || jl_atomic_load_relaxed(&codeinst->invoke) != NULL) &&
- jl_atomic_load_relaxed(&codeinst->min_world) <= min_world &&
- jl_atomic_load_relaxed(&codeinst->max_world) >= max_world &&
+ (target_world ? 1 : jl_atomic_load_relaxed(&codeinst->invoke) != NULL) &&
+ jl_atomic_load_relaxed(&codeinst->min_world) <= (target_world ? target_world : min_world) &&
+ jl_atomic_load_relaxed(&codeinst->max_world) >= (target_world ? target_world : max_world) &&
jl_egal(codeinst->def, def) &&
jl_egal(codeinst->owner, owner) &&
jl_egal(codeinst->rettype, rettype)) {
@@ -605,7 +608,7 @@ JL_DLLEXPORT jl_code_instance_t *jl_get_ci_equiv(jl_code_instance_t *ci JL_PROPA
}
codeinst = jl_atomic_load_relaxed(&codeinst->next);
}
- return (jl_code_instance_t*)jl_nothing;
+ return ci;
}
@@ -640,6 +643,9 @@ JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst(
assert(const_flags & 2);
jl_atomic_store_relaxed(&codeinst->invoke, jl_fptr_const_return);
}
+ codeinst->time_infer_total = 0;
+ codeinst->time_infer_self = 0;
+ jl_atomic_store_relaxed(&codeinst->time_compile, 0);
jl_atomic_store_relaxed(&codeinst->specsigflags, 0);
jl_atomic_store_relaxed(&codeinst->precompile, 0);
jl_atomic_store_relaxed(&codeinst->next, NULL);
@@ -652,12 +658,16 @@ JL_DLLEXPORT void jl_update_codeinst(
jl_code_instance_t *codeinst, jl_value_t *inferred,
int32_t const_flags, size_t min_world, size_t max_world,
uint32_t effects, jl_value_t *analysis_results,
+ double time_infer_total, double time_infer_cache_saved, double time_infer_self,
jl_debuginfo_t *di, jl_svec_t *edges /* , int absolute_max*/)
{
assert(min_world <= max_world && "attempting to set invalid world constraints");
//assert((!jl_is_method(codeinst->def->def.value) || max_world != ~(size_t)0 || min_world <= 1 || jl_svec_len(edges) != 0) && "missing edges");
codeinst->analysis_results = analysis_results;
jl_gc_wb(codeinst, analysis_results);
+ codeinst->time_infer_total = julia_double_to_half(time_infer_total);
+ codeinst->time_infer_cache_saved = julia_double_to_half(time_infer_cache_saved);
+ codeinst->time_infer_self = julia_double_to_half(time_infer_self);
jl_atomic_store_relaxed(&codeinst->ipo_purity_bits, effects);
jl_atomic_store_relaxed(&codeinst->debuginfo, di);
jl_gc_wb(codeinst, di);
@@ -759,33 +769,6 @@ JL_DLLEXPORT int jl_mi_try_insert(jl_method_instance_t *mi JL_ROOTING_ARGUMENT,
return ret;
}
-static int get_method_unspec_list(jl_typemap_entry_t *def, void *closure)
-{
- size_t world = jl_atomic_load_acquire(&jl_world_counter);
- jl_value_t *specializations = jl_atomic_load_relaxed(&def->func.method->specializations);
- if (specializations == (jl_value_t*)jl_emptysvec)
- return 1;
- if (!jl_is_svec(specializations)) {
- jl_method_instance_t *mi = (jl_method_instance_t*)specializations;
- assert(jl_is_method_instance(mi));
- if (jl_rettype_inferred_native(mi, world, world) == jl_nothing)
- jl_array_ptr_1d_push((jl_array_t*)closure, (jl_value_t*)mi);
- return 1;
- }
- size_t i, l = jl_svec_len(specializations);
- JL_GC_PUSH1(&specializations);
- for (i = 0; i < l; i++) {
- jl_method_instance_t *mi = (jl_method_instance_t*)jl_svecref(specializations, i);
- if ((jl_value_t*)mi != jl_nothing) {
- assert(jl_is_method_instance(mi));
- if (jl_rettype_inferred_native(mi, world, world) == jl_nothing)
- jl_array_ptr_1d_push((jl_array_t*)closure, (jl_value_t*)mi);
- }
- }
- JL_GC_POP();
- return 1;
-}
-
int foreach_mtable_in_module(
jl_module_t *m,
int (*visit)(jl_methtable_t *mt, void *env),
@@ -865,42 +848,14 @@ int jl_foreach_reachable_mtable(int (*visit)(jl_methtable_t *mt, void *env), voi
return 1;
}
-static int reset_mt_caches(jl_methtable_t *mt, void *env)
-{
- // removes all method caches
- // this might not be entirely safe (GC or MT), thus we only do it very early in bootstrapping
- if (!mt->frozen) { // make sure not to reset builtin functions
- jl_atomic_store_release(&mt->leafcache, (jl_genericmemory_t*)jl_an_empty_memory_any);
- jl_atomic_store_release(&mt->cache, jl_nothing);
- }
- jl_typemap_visitor(jl_atomic_load_relaxed(&mt->defs), get_method_unspec_list, env);
- return 1;
-}
-
jl_function_t *jl_typeinf_func JL_GLOBALLY_ROOTED = NULL;
JL_DLLEXPORT size_t jl_typeinf_world = 1;
JL_DLLEXPORT void jl_set_typeinf_func(jl_value_t *f)
{
- size_t newfunc = jl_typeinf_world == 1 && jl_typeinf_func == NULL;
jl_typeinf_func = (jl_function_t*)f;
jl_typeinf_world = jl_get_tls_world_age();
- int world = jl_atomic_fetch_add(&jl_world_counter, 1) + 1; // make type-inference the only thing in this world
- if (newfunc) {
- // give type inference a chance to see all of these
- // TODO: also reinfer if max_world != ~(size_t)0
- jl_array_t *unspec = jl_alloc_vec_any(0);
- JL_GC_PUSH1(&unspec);
- jl_foreach_reachable_mtable(reset_mt_caches, (void*)unspec);
- size_t i, l;
- for (i = 0, l = jl_array_nrows(unspec); i < l; i++) {
- jl_method_instance_t *mi = (jl_method_instance_t*)jl_array_ptr_ref(unspec, i);
- if (jl_rettype_inferred_native(mi, world, world) == jl_nothing)
- jl_type_infer(mi, world, SOURCE_MODE_NOT_REQUIRED);
- }
- JL_GC_POP();
- }
}
static int very_general_type(jl_value_t *t)
@@ -1663,6 +1618,15 @@ jl_method_instance_t *cache_method(
}
else {
jl_typemap_insert(cache, parent, newentry, offs);
+ if (mt) {
+ jl_datatype_t *dt = jl_nth_argument_datatype((jl_value_t*)tt, 1);
+ if (dt) {
+ jl_typename_t *tn = dt->name;
+ int cache_entry_count = jl_atomic_load_relaxed(&tn->cache_entry_count);
+ if (cache_entry_count < 31)
+ jl_atomic_store_relaxed(&tn->cache_entry_count, cache_entry_count + 1);
+ }
+ }
}
JL_GC_POP();
@@ -1727,7 +1691,6 @@ static int get_intersect_visitor(jl_typemap_entry_t *oldentry, struct typemap_in
assert(jl_atomic_load_relaxed(&oldentry->min_world) <= jl_atomic_load_relaxed(&closure->newentry->min_world) && "old method cannot be newer than new method");
assert(jl_atomic_load_relaxed(&oldentry->max_world) != jl_atomic_load_relaxed(&closure->newentry->min_world) && "method cannot be added at the same time as method deleted");
// don't need to consider other similar methods if this oldentry will always fully intersect with them and dominates all of them
- typemap_slurp_search(oldentry, &closure->match);
jl_method_t *oldmethod = oldentry->func.method;
if (closure->match.issubty // e.g. jl_subtype(closure->newentry.sig, oldentry->sig)
&& jl_subtype(oldmethod->sig, (jl_value_t*)closure->newentry->sig)) { // e.g. jl_type_equal(closure->newentry->sig, oldentry->sig)
@@ -1736,7 +1699,18 @@ static int get_intersect_visitor(jl_typemap_entry_t *oldentry, struct typemap_in
}
if (closure->shadowed == NULL)
closure->shadowed = (jl_value_t*)jl_alloc_vec_any(0);
+ if (closure->match.issubty) { // this should be rarely true (in fact, get_intersect_visitor should be rarely true), but might as well skip the rest of the scan fast anyways since we can
+ int only = jl_atomic_load_relaxed(&oldmethod->dispatch_status) & METHOD_SIG_LATEST_ONLY;
+ if (only) {
+ size_t len = jl_array_nrows(closure->shadowed);
+ if (len > 0)
+ jl_array_del_end((jl_array_t*)closure->shadowed, len);
+ jl_array_ptr_1d_push((jl_array_t*)closure->shadowed, (jl_value_t*)oldmethod);
+ return 0;
+ }
+ }
jl_array_ptr_1d_push((jl_array_t*)closure->shadowed, (jl_value_t*)oldmethod);
+ typemap_slurp_search(oldentry, &closure->match);
return 1;
}
@@ -1839,7 +1813,7 @@ JL_DLLEXPORT jl_value_t *jl_debug_method_invalidation(int state)
return jl_nothing;
}
-static void _invalidate_backedges(jl_method_instance_t *replaced_mi, size_t max_world, int depth);
+static void _invalidate_backedges(jl_method_instance_t *replaced_mi, jl_code_instance_t *replaced_ci, size_t max_world, int depth);
// recursively invalidate cached methods that had an edge to a replaced method
static void invalidate_code_instance(jl_code_instance_t *replaced, size_t max_world, int depth)
@@ -1858,13 +1832,15 @@ static void invalidate_code_instance(jl_code_instance_t *replaced, size_t max_wo
if (!jl_is_method(replaced_mi->def.method))
return; // shouldn't happen, but better to be safe
JL_LOCK(&replaced_mi->def.method->writelock);
- if (jl_atomic_load_relaxed(&replaced->max_world) == ~(size_t)0) {
+ size_t replacedmaxworld = jl_atomic_load_relaxed(&replaced->max_world);
+ if (replacedmaxworld == ~(size_t)0) {
assert(jl_atomic_load_relaxed(&replaced->min_world) - 1 <= max_world && "attempting to set illogical world constraints (probable race condition)");
jl_atomic_store_release(&replaced->max_world, max_world);
+ // recurse to all backedges to update their valid range also
+ _invalidate_backedges(replaced_mi, replaced, max_world, depth + 1);
+ } else {
+ assert(jl_atomic_load_relaxed(&replaced->max_world) <= max_world);
}
- assert(jl_atomic_load_relaxed(&replaced->max_world) <= max_world);
- // recurse to all backedges to update their valid range also
- _invalidate_backedges(replaced_mi, max_world, depth + 1);
JL_UNLOCK(&replaced_mi->def.method->writelock);
}
@@ -1873,28 +1849,141 @@ JL_DLLEXPORT void jl_invalidate_code_instance(jl_code_instance_t *replaced, size
invalidate_code_instance(replaced, max_world, 1);
}
-static void _invalidate_backedges(jl_method_instance_t *replaced_mi, size_t max_world, int depth) {
- jl_array_t *backedges = replaced_mi->backedges;
- if (backedges) {
- // invalidate callers (if any)
+static void _invalidate_backedges(jl_method_instance_t *replaced_mi, jl_code_instance_t *replaced_ci, size_t max_world, int depth) {
+ uint8_t recursion_flags = 0;
+ jl_array_t *backedges = jl_mi_get_backedges_mutate(replaced_mi, &recursion_flags);
+ if (!backedges)
+ return;
+ // invalidate callers (if any)
+ if (!replaced_ci) {
+ // We know all backedges are deleted - clear them eagerly
+ // Clears both array and flags
replaced_mi->backedges = NULL;
- JL_GC_PUSH1(&backedges);
- size_t i = 0, l = jl_array_nrows(backedges);
- jl_code_instance_t *replaced;
- while (i < l) {
- i = get_next_edge(backedges, i, NULL, &replaced);
- JL_GC_PROMISE_ROOTED(replaced); // propagated by get_next_edge from backedges
- invalidate_code_instance(replaced, max_world, depth);
+ jl_atomic_fetch_and_relaxed(&replaced_mi->flags, ~MI_FLAG_BACKEDGES_ALL);
+ }
+ JL_GC_PUSH1(&backedges);
+ size_t i = 0, l = jl_array_nrows(backedges);
+ size_t ins = 0;
+ jl_code_instance_t *replaced;
+ while (i < l) {
+ jl_value_t *invokesig = NULL;
+ i = get_next_edge(backedges, i, &invokesig, &replaced);
+ if (!replaced) {
+ ins = i;
+ continue;
+ }
+ JL_GC_PROMISE_ROOTED(replaced); // propagated by get_next_edge from backedges
+ if (replaced_ci) {
+ // If we're invalidating a particular codeinstance, only invalidate
+ // this backedge it actually has an edge for our codeinstance.
+ jl_svec_t *edges = jl_atomic_load_relaxed(&replaced->edges);
+ for (size_t j = 0; j < jl_svec_len(edges); ++j) {
+ jl_value_t *edge = jl_svecref(edges, j);
+ if (edge == (jl_value_t*)replaced_mi || edge == (jl_value_t*)replaced_ci)
+ goto found;
+ }
+ ins = set_next_edge(backedges, ins, invokesig, replaced);
+ continue;
+ found:;
+ ins = clear_next_edge(backedges, ins, invokesig, replaced);
+ jl_atomic_fetch_or(&replaced_mi->flags, MI_FLAG_BACKEDGES_DIRTY);
+ /* fallthrough */
+ }
+ invalidate_code_instance(replaced, max_world, depth);
+ if (replaced_ci && !replaced_mi->backedges) {
+ // Fast-path early out. If `invalidate_code_instance` invalidated
+ // the entire mi via a recursive edge, there's no point to keep
+ // iterating - they'll already have been invalidated.
+ break;
+ }
+ }
+ if (replaced_ci)
+ jl_mi_done_backedges(replaced_mi, recursion_flags);
+ JL_GC_POP();
+}
+
+enum morespec_options {
+ morespec_unknown,
+ morespec_isnot,
+ morespec_is
+};
+
+// check if `type` is replacing `m` with an ambiguity here, given other methods in `d` that already match it
+static int is_replacing(char ambig, jl_value_t *type, jl_method_t *m, jl_method_t *const *d, size_t n, jl_value_t *isect, jl_value_t *isect2, char *morespec)
+{
+ size_t k;
+ for (k = 0; k < n; k++) {
+ jl_method_t *m2 = d[k];
+ // see if m2 also fully covered this intersection
+ if (m == m2 || !(jl_subtype(isect, m2->sig) || (isect2 && jl_subtype(isect2, m2->sig))))
+ continue;
+ if (morespec[k] == (char)morespec_unknown)
+ morespec[k] = (char)(jl_type_morespecific(m2->sig, type) ? morespec_is : morespec_isnot);
+ if (morespec[k] == (char)morespec_is)
+ // not actually shadowing this--m2 will still be better
+ return 0;
+ // if type is not more specific than m (thus now dominating it)
+ // then there is a new ambiguity here,
+ // since m2 was also a previous match over isect,
+ // see if m was previously dominant over all m2
+ // or if this was already ambiguous before
+ if (ambig == morespec_is && !jl_type_morespecific(m->sig, m2->sig)) {
+ // m and m2 were previously ambiguous over the full intersection of mi with type, and will still be ambiguous with addition of type
+ return 0;
}
- JL_GC_POP();
}
+ return 1;
+}
+
+static int _invalidate_dispatch_backedges(jl_method_instance_t *mi, jl_value_t *type, jl_method_t *m,
+ jl_method_t *const *d, size_t n, int replaced_dispatch, int ambig,
+ size_t max_world, char *morespec)
+{
+ uint8_t backedge_recursion_flags = 0;
+ jl_array_t *backedges = jl_mi_get_backedges_mutate(mi, &backedge_recursion_flags);
+ if (!backedges)
+ return 0;
+ size_t ib = 0, insb = 0, nb = jl_array_nrows(backedges);
+ jl_value_t *invokeTypes;
+ jl_code_instance_t *caller;
+ int invalidated_any = 0;
+ while (mi->backedges && ib < nb) {
+ ib = get_next_edge(backedges, ib, &invokeTypes, &caller);
+ if (!caller) {
+ insb = ib;
+ continue;
+ }
+ JL_GC_PROMISE_ROOTED(caller); // propagated by get_next_edge from backedges
+ int replaced_edge;
+ if (invokeTypes) {
+ // n.b. normally we must have mi.specTypes <: invokeTypes <: m.sig (though it might not strictly hold), so we only need to check the other subtypes
+ if (jl_egal(invokeTypes, jl_get_ci_mi(caller)->def.method->sig))
+ replaced_edge = 0; // if invokeTypes == m.sig, then the only way to change this invoke is to replace the method itself
+ else
+ replaced_edge = jl_subtype(invokeTypes, type) && is_replacing(ambig, type, m, d, n, invokeTypes, NULL, morespec);
+ }
+ else {
+ replaced_edge = replaced_dispatch;
+ }
+ if (replaced_edge) {
+ invalidate_code_instance(caller, max_world, 1);
+ insb = clear_next_edge(backedges, insb, invokeTypes, caller);
+ jl_atomic_fetch_or(&mi->flags, MI_FLAG_BACKEDGES_DIRTY);
+ invalidated_any = 1;
+ }
+ else {
+ insb = set_next_edge(backedges, insb, invokeTypes, caller);
+ }
+ }
+ jl_mi_done_backedges(mi, backedge_recursion_flags);
+ return invalidated_any;
}
// invalidate cached methods that overlap this definition
static void invalidate_backedges(jl_method_instance_t *replaced_mi, size_t max_world, const char *why)
{
JL_LOCK(&replaced_mi->def.method->writelock);
- _invalidate_backedges(replaced_mi, max_world, 1);
+ _invalidate_backedges(replaced_mi, NULL, max_world, 1);
JL_UNLOCK(&replaced_mi->def.method->writelock);
if (why && _jl_debug_method_invalidation) {
jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)replaced_mi);
@@ -1918,20 +2007,22 @@ JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee,
JL_LOCK(&callee->def.method->writelock);
if (jl_atomic_load_relaxed(&allow_new_worlds)) {
int found = 0;
+ jl_array_t *backedges = jl_mi_get_backedges(callee);
// TODO: use jl_cache_type_(invokesig) like cache_method does to save memory
- if (!callee->backedges) {
+ if (!backedges) {
// lazy-init the backedges array
- callee->backedges = jl_alloc_vec_any(0);
- jl_gc_wb(callee, callee->backedges);
+ backedges = jl_alloc_vec_any(0);
+ callee->backedges = backedges;
+ jl_gc_wb(callee, backedges);
}
else {
- size_t i = 0, l = jl_array_nrows(callee->backedges);
+ size_t i = 0, l = jl_array_nrows(backedges);
for (i = 0; i < l; i++) {
// optimized version of while (i < l) i = get_next_edge(callee->backedges, i, &invokeTypes, &mi);
- jl_value_t *mi = jl_array_ptr_ref(callee->backedges, i);
- if (mi != (jl_value_t*)caller)
+ jl_value_t *ciedge = jl_array_ptr_ref(backedges, i);
+ if (ciedge != (jl_value_t*)caller)
continue;
- jl_value_t *invokeTypes = i > 0 ? jl_array_ptr_ref(callee->backedges, i - 1) : NULL;
+ jl_value_t *invokeTypes = i > 0 ? jl_array_ptr_ref(backedges, i - 1) : NULL;
if (invokeTypes && jl_is_method_instance(invokeTypes))
invokeTypes = NULL;
if ((invokesig == NULL && invokeTypes == NULL) ||
@@ -1942,7 +2033,7 @@ JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee,
}
}
if (!found)
- push_edge(callee->backedges, invokesig, caller);
+ push_edge(backedges, invokesig, caller);
}
JL_UNLOCK(&callee->def.method->writelock);
}
@@ -2131,13 +2222,13 @@ static int erase_method_backedges(jl_typemap_entry_t *def, void *closure)
for (i = 0; i < l; i++) {
jl_method_instance_t *mi = (jl_method_instance_t*)jl_svecref(specializations, i);
if ((jl_value_t*)mi != jl_nothing) {
- mi->backedges = NULL;
+ mi->backedges = 0;
}
}
}
else {
jl_method_instance_t *mi = (jl_method_instance_t*)specializations;
- mi->backedges = NULL;
+ mi->backedges = 0;
}
JL_UNLOCK(&method->writelock);
return 1;
@@ -2170,17 +2261,22 @@ JL_DLLEXPORT void jl_method_table_disable(jl_methtable_t *mt, jl_method_t *metho
JL_LOCK(&world_counter_lock);
if (!jl_atomic_load_relaxed(&allow_new_worlds))
jl_error("Method changes have been disabled via a call to disable_new_worlds.");
- JL_LOCK(&mt->writelock);
- // Narrow the world age on the method to make it uncallable
- size_t world = jl_atomic_load_relaxed(&jl_world_counter);
- assert(method == methodentry->func.method);
- assert(jl_atomic_load_relaxed(&method->deleted_world) == ~(size_t)0);
- jl_atomic_store_relaxed(&method->deleted_world, world);
- jl_atomic_store_relaxed(&methodentry->max_world, world);
- jl_method_table_invalidate(mt, method, world);
- jl_atomic_store_release(&jl_world_counter, world + 1);
- JL_UNLOCK(&mt->writelock);
+ int enabled = jl_atomic_load_relaxed(&methodentry->max_world) == ~(size_t)0;
+ if (enabled) {
+ JL_LOCK(&mt->writelock);
+ // Narrow the world age on the method to make it uncallable
+ size_t world = jl_atomic_load_relaxed(&jl_world_counter);
+ assert(method == methodentry->func.method);
+ jl_atomic_store_relaxed(&method->dispatch_status, 0);
+ assert(jl_atomic_load_relaxed(&methodentry->max_world) == ~(size_t)0);
+ jl_atomic_store_relaxed(&methodentry->max_world, world);
+ jl_method_table_invalidate(mt, method, world);
+ jl_atomic_store_release(&jl_world_counter, world + 1);
+ JL_UNLOCK(&mt->writelock);
+ }
JL_UNLOCK(&world_counter_lock);
+ if (!enabled)
+ jl_errorf("Method of %s already disabled", jl_symbol_name(method->name));
}
static int jl_type_intersection2(jl_value_t *t1, jl_value_t *t2, jl_value_t **isect JL_REQUIRE_ROOTED_SLOT, jl_value_t **isect2 JL_REQUIRE_ROOTED_SLOT)
@@ -2209,39 +2305,6 @@ static int jl_type_intersection2(jl_value_t *t1, jl_value_t *t2, jl_value_t **is
return 1;
}
-enum morespec_options {
- morespec_unknown,
- morespec_isnot,
- morespec_is
-};
-
-// check if `type` is replacing `m` with an ambiguity here, given other methods in `d` that already match it
-static int is_replacing(char ambig, jl_value_t *type, jl_method_t *m, jl_method_t *const *d, size_t n, jl_value_t *isect, jl_value_t *isect2, char *morespec)
-{
- size_t k;
- for (k = 0; k < n; k++) {
- jl_method_t *m2 = d[k];
- // see if m2 also fully covered this intersection
- if (m == m2 || !(jl_subtype(isect, m2->sig) || (isect2 && jl_subtype(isect2, m2->sig))))
- continue;
- if (morespec[k] == (char)morespec_unknown)
- morespec[k] = (char)(jl_type_morespecific(m2->sig, type) ? morespec_is : morespec_isnot);
- if (morespec[k] == (char)morespec_is)
- // not actually shadowing this--m2 will still be better
- return 0;
- // if type is not more specific than m (thus now dominating it)
- // then there is a new ambiguity here,
- // since m2 was also a previous match over isect,
- // see if m was previously dominant over all m2
- // or if this was already ambiguous before
- if (ambig != morespec_is && !jl_type_morespecific(m->sig, m2->sig)) {
- // m and m2 were previously ambiguous over the full intersection of mi with type, and will still be ambiguous with addition of type
- return 0;
- }
- }
- return 1;
-}
-
jl_typemap_entry_t *jl_method_table_add(jl_methtable_t *mt, jl_method_t *method, jl_tupletype_t *simpletype)
{
JL_TIMING(ADD_METHOD, ADD_METHOD);
@@ -2253,9 +2316,9 @@ jl_typemap_entry_t *jl_method_table_add(jl_methtable_t *mt, jl_method_t *method,
JL_LOCK(&mt->writelock);
// add our new entry
assert(jl_atomic_load_relaxed(&method->primary_world) == ~(size_t)0); // min-world
- assert(jl_atomic_load_relaxed(&method->deleted_world) == 1); // max-world
- newentry = jl_typemap_alloc((jl_tupletype_t*)method->sig, simpletype, jl_emptysvec, (jl_value_t*)method,
- jl_atomic_load_relaxed(&method->primary_world), jl_atomic_load_relaxed(&method->deleted_world));
+ assert((jl_atomic_load_relaxed(&method->dispatch_status) & METHOD_SIG_LATEST_WHICH) == 0);
+ assert((jl_atomic_load_relaxed(&method->dispatch_status) & METHOD_SIG_LATEST_ONLY) == 0);
+ newentry = jl_typemap_alloc((jl_tupletype_t*)method->sig, simpletype, jl_emptysvec, (jl_value_t*)method, ~(size_t)0, 1);
jl_typemap_insert(&mt->defs, (jl_value_t*)mt, newentry, jl_cachearg_offset(mt));
update_max_args(mt, method->sig);
JL_UNLOCK(&mt->writelock);
@@ -2276,7 +2339,8 @@ void jl_method_table_activate(jl_methtable_t *mt, jl_typemap_entry_t *newentry)
JL_LOCK(&mt->writelock);
size_t world = jl_atomic_load_relaxed(&method->primary_world);
assert(world == jl_atomic_load_relaxed(&jl_world_counter) + 1); // min-world
- assert(jl_atomic_load_relaxed(&method->deleted_world) == ~(size_t)0); // max-world
+ assert((jl_atomic_load_relaxed(&method->dispatch_status) & METHOD_SIG_LATEST_WHICH) == 0);
+ assert((jl_atomic_load_relaxed(&method->dispatch_status) & METHOD_SIG_LATEST_ONLY) == 0);
assert(jl_atomic_load_relaxed(&newentry->min_world) == ~(size_t)0);
assert(jl_atomic_load_relaxed(&newentry->max_world) == 1);
jl_atomic_store_relaxed(&newentry->min_world, world);
@@ -2291,12 +2355,17 @@ void jl_method_table_activate(jl_methtable_t *mt, jl_typemap_entry_t *newentry)
// then check what entries we replaced
oldvalue = get_intersect_matches(jl_atomic_load_relaxed(&mt->defs), newentry, &replaced, jl_cachearg_offset(mt), max_world);
int invalidated = 0;
+ int only = !(jl_atomic_load_relaxed(&method->dispatch_status) & METHOD_SIG_PRECOMPILE_MANY); // will compute if this will be currently the only result that would returned from `ml_matches` given `sig`
if (replaced) {
oldvalue = (jl_value_t*)replaced;
+ jl_method_t *m = replaced->func.method;
invalidated = 1;
- method_overwrite(newentry, replaced->func.method);
+ method_overwrite(newentry, m);
// this is an optimized version of below, given we know the type-intersection is exact
- jl_method_table_invalidate(mt, replaced->func.method, max_world);
+ jl_method_table_invalidate(mt, m, max_world);
+ int m_dispatch = jl_atomic_load_relaxed(&m->dispatch_status);
+ jl_atomic_store_relaxed(&m->dispatch_status, 0);
+ only = m_dispatch & METHOD_SIG_LATEST_ONLY;
}
else {
jl_method_t *const *d;
@@ -2368,11 +2437,13 @@ void jl_method_table_activate(jl_methtable_t *mt, jl_typemap_entry_t *newentry)
memset(morespec, morespec_unknown, n);
for (j = 0; j < n; j++) {
jl_method_t *m = d[j];
- if (morespec[j] == (char)morespec_is)
+ if (morespec[j] == (char)morespec_is) {
+ only = 0;
continue;
+ }
loctag = jl_atomic_load_relaxed(&m->specializations); // use loctag for a gcroot
_Atomic(jl_method_instance_t*) *data;
- size_t i, l;
+ size_t l;
if (jl_is_svec(loctag)) {
data = (_Atomic(jl_method_instance_t*)*)jl_svec_data(loctag);
l = jl_svec_len(loctag);
@@ -2382,7 +2453,7 @@ void jl_method_table_activate(jl_methtable_t *mt, jl_typemap_entry_t *newentry)
l = 1;
}
enum morespec_options ambig = morespec_unknown;
- for (i = 0; i < l; i++) {
+ for (size_t i = 0; i < l; i++) {
jl_method_instance_t *mi = jl_atomic_load_relaxed(&data[i]);
if ((jl_value_t*)mi == jl_nothing)
continue;
@@ -2399,48 +2470,35 @@ void jl_method_table_activate(jl_methtable_t *mt, jl_typemap_entry_t *newentry)
// not actually shadowing--the existing method is still better
break;
if (ambig == morespec_unknown)
- ambig = jl_type_morespecific(type, m->sig) ? morespec_is : morespec_isnot;
+ ambig = jl_type_morespecific(type, m->sig) ? morespec_isnot : morespec_is;
// replacing a method--see if this really was the selected method previously
// over the intersection (not ambiguous) and the new method will be selected now (morespec_is)
int replaced_dispatch = is_replacing(ambig, type, m, d, n, isect, isect2, morespec);
// found that this specialization dispatch got replaced by m
// call invalidate_backedges(mi, max_world, "jl_method_table_insert");
// but ignore invoke-type edges
- jl_array_t *backedges = mi->backedges;
- if (backedges) {
- size_t ib = 0, insb = 0, nb = jl_array_nrows(backedges);
- jl_value_t *invokeTypes;
- jl_code_instance_t *caller;
- while (ib < nb) {
- ib = get_next_edge(backedges, ib, &invokeTypes, &caller);
- JL_GC_PROMISE_ROOTED(caller); // propagated by get_next_edge from backedges
- int replaced_edge;
- if (invokeTypes) {
- // n.b. normally we must have mi.specTypes <: invokeTypes <: m.sig (though it might not strictly hold), so we only need to check the other subtypes
- if (jl_egal(invokeTypes, jl_get_ci_mi(caller)->def.method->sig))
- replaced_edge = 0; // if invokeTypes == m.sig, then the only way to change this invoke is to replace the method itself
- else
- replaced_edge = jl_subtype(invokeTypes, type) && is_replacing(ambig, type, m, d, n, invokeTypes, NULL, morespec);
- }
- else {
- replaced_edge = replaced_dispatch;
- }
- if (replaced_edge) {
- invalidate_code_instance(caller, max_world, 1);
- invalidated = 1;
- }
- else {
- insb = set_next_edge(backedges, insb, invokeTypes, caller);
- }
- }
- jl_array_del_end(backedges, nb - insb);
- }
+ int invalidatedmi = _invalidate_dispatch_backedges(mi, type, m, d, n, replaced_dispatch, ambig, max_world, morespec);
jl_array_ptr_1d_push(oldmi, (jl_value_t*)mi);
- if (_jl_debug_method_invalidation && invalidated) {
+ if (_jl_debug_method_invalidation && invalidatedmi) {
jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)mi);
loctag = jl_cstr_to_string("jl_method_table_insert");
jl_array_ptr_1d_push(_jl_debug_method_invalidation, loctag);
}
+ invalidated |= invalidatedmi;
+ }
+ }
+ // now compute and store updates to METHOD_SIG_LATEST_ONLY
+ int m_dispatch = jl_atomic_load_relaxed(&m->dispatch_status);
+ if (m_dispatch & METHOD_SIG_LATEST_ONLY) {
+ if (morespec[j] == (char)morespec_unknown)
+ morespec[j] = (char)(jl_type_morespecific(m->sig, type) ? morespec_is : morespec_isnot);
+ if (morespec[j] == (char)morespec_isnot)
+ jl_atomic_store_relaxed(&m->dispatch_status, ~METHOD_SIG_LATEST_ONLY & m_dispatch);
+ }
+ if (only) {
+ if (morespec[j] == (char)morespec_is || ambig == morespec_is ||
+ (ambig == morespec_unknown && !jl_type_morespecific(type, m->sig))) {
+ only = 0;
}
}
}
@@ -2473,7 +2531,8 @@ void jl_method_table_activate(jl_methtable_t *mt, jl_typemap_entry_t *newentry)
loctag = jl_cstr_to_string("jl_method_table_insert");
jl_array_ptr_1d_push(_jl_debug_method_invalidation, loctag);
}
- jl_atomic_store_relaxed(&newentry->max_world, jl_atomic_load_relaxed(&method->deleted_world));
+ jl_atomic_store_relaxed(&newentry->max_world, ~(size_t)0);
+ jl_atomic_store_relaxed(&method->dispatch_status, METHOD_SIG_LATEST_WHICH | (only ? METHOD_SIG_LATEST_ONLY : 0)); // TODO: this should be sequenced fully after the world counter store
JL_UNLOCK(&mt->writelock);
JL_GC_POP();
}
@@ -2487,7 +2546,6 @@ JL_DLLEXPORT void jl_method_table_insert(jl_methtable_t *mt, jl_method_t *method
jl_error("Method changes have been disabled via a call to disable_new_worlds.");
size_t world = jl_atomic_load_relaxed(&jl_world_counter) + 1;
jl_atomic_store_relaxed(&method->primary_world, world);
- jl_atomic_store_relaxed(&method->deleted_world, ~(size_t)0);
jl_method_table_activate(mt, newentry);
jl_atomic_store_release(&jl_world_counter, world);
JL_UNLOCK(&world_counter_lock);
@@ -2815,10 +2873,9 @@ void jl_read_codeinst_invoke(jl_code_instance_t *ci, uint8_t *specsigflags, jl_c
jl_method_instance_t *jl_normalize_to_compilable_mi(jl_method_instance_t *mi JL_PROPAGATES_ROOT);
-JL_DLLEXPORT void jl_add_codeinst_to_jit(jl_code_instance_t *codeinst, jl_code_info_t *src)
+JL_DLLEXPORT void jl_add_codeinst_to_cache(jl_code_instance_t *codeinst, jl_code_info_t *src)
{
assert(jl_is_code_info(src));
- jl_emit_codeinst_to_jit(codeinst, src);
jl_method_instance_t *mi = jl_get_ci_mi(codeinst);
if (jl_generating_output() && jl_is_method(mi->def.method) && jl_atomic_load_relaxed(&codeinst->inferred) == jl_nothing) {
jl_value_t *compressed = jl_compress_ir(mi->def.method, src);
@@ -2834,6 +2891,14 @@ JL_DLLEXPORT void jl_add_codeinst_to_jit(jl_code_instance_t *codeinst, jl_code_i
}
}
+
+JL_DLLEXPORT void jl_add_codeinst_to_jit(jl_code_instance_t *codeinst, jl_code_info_t *src)
+{
+ assert(jl_is_code_info(src));
+ jl_emit_codeinst_to_jit(codeinst, src);
+ jl_add_codeinst_to_cache(codeinst, src);
+}
+
jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t world)
{
// quick check if we already have a compiled result
@@ -3165,6 +3230,7 @@ jl_method_instance_t *jl_normalize_to_compilable_mi(jl_method_instance_t *mi JL_
JL_DLLEXPORT jl_method_instance_t *jl_method_match_to_mi(jl_method_match_t *match, size_t world, size_t min_valid, size_t max_valid, int mt_cache)
{
jl_method_t *m = match->method;
+ JL_GC_PROMISE_ROOTED(m);
jl_svec_t *env = match->sparams;
jl_tupletype_t *ti = match->spec_types;
jl_method_instance_t *mi = NULL;
@@ -3200,7 +3266,8 @@ JL_DLLEXPORT jl_method_instance_t *jl_method_match_to_mi(jl_method_match_t *matc
}
// compile-time method lookup
-jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types JL_PROPAGATES_ROOT, size_t world, int mt_cache)
+// intersect types with the MT, and return a single compileable specialization that covers the intersection.
+jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types, size_t world, int mt_cache)
{
if (jl_has_free_typevars((jl_value_t*)types))
return NULL; // don't poison the cache due to a malformed query
@@ -3596,9 +3663,9 @@ STATIC_INLINE jl_method_instance_t *jl_lookup_generic_(jl_value_t *F, jl_value_t
mt = jl_gf_mtable(F);
jl_genericmemory_t *leafcache = jl_atomic_load_relaxed(&mt->leafcache);
entry = NULL;
- if (leafcache != (jl_genericmemory_t*)jl_an_empty_memory_any &&
- jl_typetagis(jl_atomic_load_relaxed(&mt->cache), jl_typemap_level_type)) {
- // hashing args is expensive, but looking at mt->cache is probably even more expensive
+ int cache_entry_count = jl_atomic_load_relaxed(&((jl_datatype_t*)FT)->name->cache_entry_count);
+ if (leafcache != (jl_genericmemory_t*)jl_an_empty_memory_any && (cache_entry_count == 0 || cache_entry_count >= 8)) {
+ // hashing args is expensive, but so do that only if looking at mc->cache is probably even more expensive
tt = lookup_arg_type_tuple(F, args, nargs);
if (tt != NULL)
entry = lookup_leafcache(leafcache, (jl_value_t*)tt, world);
@@ -3815,7 +3882,7 @@ jl_function_t *jl_new_generic_function_with_supertype(jl_sym_t *name, jl_module_
JL_GC_PUSH1(&ftype);
ftype->name->mt->name = name;
jl_gc_wb(ftype->name->mt, name);
- jl_declare_constant_val3(NULL, module, tname, (jl_value_t*)ftype, BINDING_KIND_CONST, new_world);
+ jl_declare_constant_val3(NULL, module, tname, (jl_value_t*)ftype, PARTITION_KIND_CONST, new_world);
jl_value_t *f = jl_new_struct(ftype);
ftype->instance = f;
jl_gc_wb(ftype, f);
@@ -3876,31 +3943,29 @@ static int ml_matches_visitor(jl_typemap_entry_t *ml, struct typemap_intersectio
closure->match.min_valid = max_world + 1;
return 1;
}
+ if (closure->match.max_valid > max_world)
+ closure->match.max_valid = max_world;
jl_method_t *meth = ml->func.method;
- if (closure->lim >= 0 && jl_is_dispatch_tupletype(meth->sig)) {
- int replaced = 0;
- // check if this is replaced, in which case we need to avoid double-counting it against the limit
- // (although it will figure out later which one to keep and return)
- size_t len = jl_array_nrows(closure->t);
- for (int i = 0; i < len; i++) {
- if (jl_types_equal(((jl_method_match_t*)jl_array_ptr_ref(closure->t, i))->method->sig, meth->sig)) {
- replaced = 1;
- break;
- }
- }
- if (!replaced) {
- if (closure->lim == 0)
- return 0;
- closure->lim--;
+ int only = jl_atomic_load_relaxed(&meth->dispatch_status) & METHOD_SIG_LATEST_ONLY;
+ if (closure->lim >= 0 && only) {
+ if (closure->lim == 0) {
+ closure->t = jl_an_empty_vec_any;
+ return 0;
}
+ closure->lim--;
}
- // don't need to consider other similar methods if this ml will always fully intersect with them and dominates all of them
- if (!closure->include_ambiguous || closure->lim != -1)
- typemap_slurp_search(ml, &closure->match);
closure->matc = make_method_match((jl_tupletype_t*)closure->match.ti,
closure->match.env, meth,
closure->match.issubty ? FULLY_COVERS : NOT_FULLY_COVERS);
size_t len = jl_array_nrows(closure->t);
+ if (closure->match.issubty && only) {
+ if (len == 0)
+ closure->t = (jl_value_t*)jl_alloc_vec_any(1);
+ else if (len > 1)
+ jl_array_del_end((jl_array_t*)closure->t, len - 1);
+ jl_array_ptr_set(closure->t, 0, (jl_value_t*)closure->matc);
+ return 0;
+ }
if (len == 0) {
closure->t = (jl_value_t*)jl_alloc_vec_any(1);
jl_array_ptr_set(closure->t, 0, (jl_value_t*)closure->matc);
@@ -3908,6 +3973,9 @@ static int ml_matches_visitor(jl_typemap_entry_t *ml, struct typemap_intersectio
else {
jl_array_ptr_1d_push((jl_array_t*)closure->t, (jl_value_t*)closure->matc);
}
+ // don't need to consider other similar methods if this ml will always fully intersect with them and dominates all of them
+ if (!closure->include_ambiguous || closure->lim != -1)
+ typemap_slurp_search(ml, &closure->match);
return 1;
}
@@ -4288,9 +4356,9 @@ static jl_value_t *ml_matches(jl_methtable_t *mt,
return env.t;
}
}
- if (!ml_mtable_visitor(mt, &env.match)) {
+ if (!ml_mtable_visitor(mt, &env.match) && env.t == jl_an_empty_vec_any) {
JL_GC_POP();
- // if we return early, set only the min/max valid collected from matching
+ // if we return early without returning methods, set only the min/max valid collected from matching
*min_valid = env.match.min_valid;
*max_valid = env.match.max_valid;
return jl_nothing;
@@ -4298,9 +4366,9 @@ static jl_value_t *ml_matches(jl_methtable_t *mt,
}
else {
// else: scan everything
- if (!jl_foreach_reachable_mtable(ml_mtable_visitor, &env.match)) {
+ if (!jl_foreach_reachable_mtable(ml_mtable_visitor, &env.match) && env.t == jl_an_empty_vec_any) {
JL_GC_POP();
- // if we return early, set only the min/max valid collected from matching
+ // if we return early without returning methods, set only the min/max valid collected from matching
*min_valid = env.match.min_valid;
*max_valid = env.match.max_valid;
return jl_nothing;
@@ -4548,12 +4616,9 @@ static jl_value_t *ml_matches(jl_methtable_t *mt,
jl_method_t *m = matc->method;
// method applicability is the same as typemapentry applicability
size_t min_world = jl_atomic_load_relaxed(&m->primary_world);
- size_t max_world = jl_atomic_load_relaxed(&m->deleted_world);
// intersect the env valid range with method lookup's inclusive valid range
if (env.match.min_valid < min_world)
env.match.min_valid = min_world;
- if (env.match.max_valid > max_world)
- env.match.max_valid = max_world;
}
if (mt && cache_result && ((jl_datatype_t*)unw)->isdispatchtuple) { // cache_result parameter keeps this from being recursive
if (len == 1 && !has_ambiguity) {
@@ -4614,6 +4679,47 @@ JL_DLLEXPORT void jl_typeinf_timing_end(uint64_t start, int is_recompile)
}
}
+// declare a C-callable entry point; called during code loading from the toplevel
+JL_DLLEXPORT void jl_extern_c(jl_value_t *name, jl_value_t *declrt, jl_tupletype_t *sigt)
+{
+ // validate arguments. try to do as many checks as possible here to avoid
+ // throwing errors later during codegen.
+ JL_TYPECHK(@ccallable, type, declrt);
+ if (!jl_is_tuple_type(sigt))
+ jl_type_error("@ccallable", (jl_value_t*)jl_anytuple_type_type, (jl_value_t*)sigt);
+ // check that f is a guaranteed singleton type
+ jl_datatype_t *ft = (jl_datatype_t*)jl_tparam0(sigt);
+ if (!jl_is_datatype(ft) || !jl_is_datatype_singleton(ft))
+ jl_error("@ccallable: function object must be a singleton");
+
+ // compute / validate return type
+ if (!jl_is_concrete_type(declrt) || jl_is_kind(declrt))
+ jl_error("@ccallable: return type must be concrete and correspond to a C type");
+ if (!jl_type_mappable_to_c(declrt))
+ jl_error("@ccallable: return type doesn't correspond to a C type");
+
+ // validate method signature
+ size_t i, nargs = jl_nparams(sigt);
+ for (i = 1; i < nargs; i++) {
+ jl_value_t *ati = jl_tparam(sigt, i);
+ if (!jl_is_concrete_type(ati) || jl_is_kind(ati) || !jl_type_mappable_to_c(ati))
+ jl_error("@ccallable: argument types must be concrete");
+ }
+
+ // save a record of this so that the alias is generated when we write an object file
+ jl_method_t *meth = (jl_method_t*)jl_methtable_lookup(ft->name->mt, (jl_value_t*)sigt, jl_atomic_load_acquire(&jl_world_counter));
+ if (!jl_is_method(meth))
+ jl_error("@ccallable: could not find requested method");
+ JL_GC_PUSH1(&meth);
+ if (name == jl_nothing)
+ meth->ccallable = jl_svec2(declrt, (jl_value_t*)sigt);
+ else
+ meth->ccallable = jl_svec3(declrt, (jl_value_t*)sigt, name);
+ jl_gc_wb(meth, meth->ccallable);
+ JL_GC_POP();
+}
+
+
#ifdef __cplusplus
}
#endif
diff --git a/src/init.c b/src/init.c
index aada2c75ed7a6..94bc353eeb6c1 100644
--- a/src/init.c
+++ b/src/init.c
@@ -534,169 +534,6 @@ int jl_isabspath(const char *in) JL_NOTSAFEPOINT
return 0; // relative path
}
-static char *absrealpath(const char *in, int nprefix)
-{ // compute an absolute realpath location, so that chdir doesn't change the file reference
- // ignores (copies directly over) nprefix characters at the start of abspath
-#ifndef _OS_WINDOWS_
- char *out = realpath(in + nprefix, NULL);
- if (out) {
- if (nprefix > 0) {
- size_t sz = strlen(out) + 1;
- char *cpy = (char*)malloc_s(sz + nprefix);
- memcpy(cpy, in, nprefix);
- memcpy(cpy + nprefix, out, sz);
- free(out);
- out = cpy;
- }
- }
- else {
- size_t sz = strlen(in + nprefix) + 1;
- if (in[nprefix] == PATHSEPSTRING[0]) {
- out = (char*)malloc_s(sz + nprefix);
- memcpy(out, in, sz + nprefix);
- }
- else {
- size_t path_size = JL_PATH_MAX;
- char *path = (char*)malloc_s(JL_PATH_MAX);
- if (uv_cwd(path, &path_size)) {
- jl_error("fatal error: unexpected error while retrieving current working directory");
- }
- out = (char*)malloc_s(path_size + 1 + sz + nprefix);
- memcpy(out, in, nprefix);
- memcpy(out + nprefix, path, path_size);
- out[nprefix + path_size] = PATHSEPSTRING[0];
- memcpy(out + nprefix + path_size + 1, in + nprefix, sz);
- free(path);
- }
- }
-#else
- // GetFullPathName intentionally errors if given an empty string so manually insert `.` to invoke cwd
- char *in2 = (char*)malloc_s(JL_PATH_MAX);
- if (strlen(in) - nprefix == 0) {
- memcpy(in2, in, nprefix);
- in2[nprefix] = '.';
- in2[nprefix+1] = '\0';
- in = in2;
- }
- DWORD n = GetFullPathName(in + nprefix, 0, NULL, NULL);
- if (n <= 0) {
- jl_error("fatal error: jl_options.image_file path too long or GetFullPathName failed");
- }
- char *out = (char*)malloc_s(n + nprefix);
- DWORD m = GetFullPathName(in + nprefix, n, out + nprefix, NULL);
- if (n != m + 1) {
- jl_error("fatal error: jl_options.image_file path too long or GetFullPathName failed");
- }
- memcpy(out, in, nprefix);
- free(in2);
-#endif
- return out;
-}
-
-// create an absolute-path copy of the input path format string
-// formed as `joinpath(replace(pwd(), "%" => "%%"), in)`
-// unless `in` starts with `%`
-static const char *absformat(const char *in)
-{
- if (in[0] == '%' || jl_isabspath(in))
- return in;
- // get an escaped copy of cwd
- size_t path_size = JL_PATH_MAX;
- char path[JL_PATH_MAX];
- if (uv_cwd(path, &path_size)) {
- jl_error("fatal error: unexpected error while retrieving current working directory");
- }
- size_t sz = strlen(in) + 1;
- size_t i, fmt_size = 0;
- for (i = 0; i < path_size; i++)
- fmt_size += (path[i] == '%' ? 2 : 1);
- char *out = (char*)malloc_s(fmt_size + 1 + sz);
- fmt_size = 0;
- for (i = 0; i < path_size; i++) { // copy-replace pwd portion
- char c = path[i];
- out[fmt_size++] = c;
- if (c == '%')
- out[fmt_size++] = '%';
- }
- out[fmt_size++] = PATHSEPSTRING[0]; // path sep
- memcpy(out + fmt_size, in, sz); // copy over format, including nul
- return out;
-}
-
-static void jl_resolve_sysimg_location(JL_IMAGE_SEARCH rel)
-{
- // this function resolves the paths in jl_options to absolute file locations as needed
- // and it replaces the pointers to `julia_bindir`, `julia_bin`, `image_file`, and output file paths
- // it may fail, print an error, and exit(1) if any of these paths are longer than JL_PATH_MAX
- //
- // note: if you care about lost memory, you should call the appropriate `free()` function
- // on the original pointer for each `char*` you've inserted into `jl_options`, after
- // calling `julia_init()`
- char *free_path = (char*)malloc_s(JL_PATH_MAX);
- size_t path_size = JL_PATH_MAX;
- if (uv_exepath(free_path, &path_size)) {
- jl_error("fatal error: unexpected error while retrieving exepath");
- }
- if (path_size >= JL_PATH_MAX) {
- jl_error("fatal error: jl_options.julia_bin path too long");
- }
- jl_options.julia_bin = (char*)malloc_s(path_size + 1);
- memcpy((char*)jl_options.julia_bin, free_path, path_size);
- ((char*)jl_options.julia_bin)[path_size] = '\0';
- if (!jl_options.julia_bindir) {
- jl_options.julia_bindir = getenv("JULIA_BINDIR");
- if (!jl_options.julia_bindir) {
- jl_options.julia_bindir = dirname(free_path);
- }
- }
- if (jl_options.julia_bindir)
- jl_options.julia_bindir = absrealpath(jl_options.julia_bindir, 0);
- free(free_path);
- free_path = NULL;
- if (jl_options.image_file) {
- if (rel == JL_IMAGE_JULIA_HOME && !jl_isabspath(jl_options.image_file)) {
- // build time path, relative to JULIA_BINDIR
- free_path = (char*)malloc_s(JL_PATH_MAX);
- int n = snprintf(free_path, JL_PATH_MAX, "%s" PATHSEPSTRING "%s",
- jl_options.julia_bindir, jl_options.image_file);
- if (n >= JL_PATH_MAX || n < 0) {
- jl_error("fatal error: jl_options.image_file path too long");
- }
- jl_options.image_file = free_path;
- }
- if (jl_options.image_file)
- jl_options.image_file = absrealpath(jl_options.image_file, 0);
- if (free_path) {
- free(free_path);
- free_path = NULL;
- }
- }
- if (jl_options.outputo)
- jl_options.outputo = absrealpath(jl_options.outputo, 0);
- if (jl_options.outputji)
- jl_options.outputji = absrealpath(jl_options.outputji, 0);
- if (jl_options.outputbc)
- jl_options.outputbc = absrealpath(jl_options.outputbc, 0);
- if (jl_options.outputasm)
- jl_options.outputasm = absrealpath(jl_options.outputasm, 0);
- if (jl_options.machine_file)
- jl_options.machine_file = absrealpath(jl_options.machine_file, 0);
- if (jl_options.output_code_coverage)
- jl_options.output_code_coverage = absformat(jl_options.output_code_coverage);
- if (jl_options.tracked_path)
- jl_options.tracked_path = absrealpath(jl_options.tracked_path, 0);
-
- const char **cmdp = jl_options.cmds;
- if (cmdp) {
- for (; *cmdp; cmdp++) {
- const char *cmd = *cmdp;
- if (cmd[0] == 'L') {
- *cmdp = absrealpath(cmd, 1);
- }
- }
- }
-}
-
JL_DLLEXPORT int jl_is_file_tracked(jl_sym_t *path)
{
const char* path_ = jl_symbol_name(path);
@@ -722,8 +559,90 @@ static void restore_fp_env(void)
jl_error("Failed to configure floating point environment");
}
}
+static NOINLINE void _finish_jl_init_(jl_image_buf_t sysimage, jl_ptls_t ptls, jl_task_t *ct)
+{
+ JL_TIMING(JULIA_INIT, JULIA_INIT);
+
+ if (sysimage.kind == JL_IMAGE_KIND_SO)
+ jl_gc_notify_image_load(sysimage.data, sysimage.size);
+
+ if (jl_options.cpu_target == NULL)
+ jl_options.cpu_target = "native";
+
+ // Parse image, perform relocations, and init JIT targets, etc.
+ jl_image_t parsed_image = jl_init_processor_sysimg(sysimage, jl_options.cpu_target);
+
+ jl_init_codegen();
+ jl_init_common_symbols();
+
+ if (sysimage.kind != JL_IMAGE_KIND_NONE) {
+ // Load the .ji or .so sysimage
+ jl_restore_system_image(&parsed_image, sysimage);
+ } else {
+ // No sysimage provided, init a minimal environment
+ jl_init_types();
+ jl_global_roots_list = (jl_genericmemory_t*)jl_an_empty_memory_any;
+ jl_global_roots_keyset = (jl_genericmemory_t*)jl_an_empty_memory_any;
+ }
+
+ jl_init_flisp();
+ jl_init_serializer();
+
+ if (sysimage.kind == JL_IMAGE_KIND_NONE) {
+ jl_top_module = jl_core_module;
+ jl_init_intrinsic_functions();
+ jl_init_primitives();
+ jl_init_main_module();
+ jl_load(jl_core_module, "boot.jl");
+ jl_current_task->world_age = jl_atomic_load_acquire(&jl_world_counter);
+ post_boot_hooks();
+ }
+
+ if (jl_base_module == NULL) {
+ // nthreads > 1 requires code in Base
+ jl_atomic_store_relaxed(&jl_n_threads, 1);
+ jl_n_markthreads = 0;
+ jl_n_sweepthreads = 0;
+ jl_n_gcthreads = 0;
+ jl_n_threads_per_pool[JL_THREADPOOL_ID_INTERACTIVE] = 0;
+ jl_n_threads_per_pool[JL_THREADPOOL_ID_DEFAULT] = 1;
+ } else {
+ jl_current_task->world_age = jl_atomic_load_acquire(&jl_world_counter);
+ post_image_load_hooks();
+ }
+ jl_start_threads();
+ jl_start_gc_threads();
+ uv_barrier_wait(&thread_init_done);
+
+ if (jl_base_module != NULL) {
+ // requires code in Base
+ jl_init_heartbeat();
+ }
+
+ jl_gc_enable(1);
+
+ if ((sysimage.kind != JL_IMAGE_KIND_NONE) &&
+ (!jl_generating_output() || jl_options.incremental) && jl_module_init_order) {
+ jl_array_t *init_order = jl_module_init_order;
+ JL_GC_PUSH1(&init_order);
+ jl_module_init_order = NULL;
+ int i, l = jl_array_nrows(init_order);
+ for (i = 0; i < l; i++) {
+ jl_value_t *mod = jl_array_ptr_ref(init_order, i);
+ jl_module_run_initializer((jl_module_t*)mod);
+ }
+ JL_GC_POP();
+ }
+
+ if (jl_options.trim) {
+ jl_entrypoint_mis = (arraylist_t *)malloc_s(sizeof(arraylist_t));
+ arraylist_new(jl_entrypoint_mis, 0);
+ }
+
+ if (jl_options.handle_signals == JL_OPTIONS_HANDLE_SIGNALS_ON)
+ jl_install_sigint_handler();
+}
-static NOINLINE void _finish_julia_init(JL_IMAGE_SEARCH rel, jl_ptls_t ptls, jl_task_t *ct);
JL_DLLEXPORT int jl_default_debug_info_kind;
JL_DLLEXPORT jl_cgparams_t jl_default_cgparams = {
@@ -739,8 +658,8 @@ JL_DLLEXPORT jl_cgparams_t jl_default_cgparams = {
/* debug_info_level */ 0, // later jl_options.debug_level,
/* safepoint_on_entry */ 1,
/* gcstack_arg */ 1,
- /* use_jlplt*/ 1,
- /* trim */ 0 };
+ /* use_jlplt*/ 1 ,
+ /*force_emit_all=*/ 0};
static void init_global_mutexes(void) {
JL_MUTEX_INIT(&jl_modules_mutex, "jl_modules_mutex");
@@ -751,12 +670,12 @@ static void init_global_mutexes(void) {
JL_MUTEX_INIT(&profile_show_peek_cond_lock, "profile_show_peek_cond_lock");
}
-JL_DLLEXPORT void julia_init(JL_IMAGE_SEARCH rel)
+JL_DLLEXPORT void jl_init_(jl_image_buf_t sysimage)
{
// initialize many things, in no particular order
// but generally running from simple platform things to optional
// configuration features
- jl_init_timing();
+
// Make sure we finalize the tls callback before starting any threads.
(void)jl_get_pgcstack();
@@ -808,8 +727,8 @@ JL_DLLEXPORT void julia_init(JL_IMAGE_SEARCH rel)
void *stack_lo, *stack_hi;
jl_init_stack_limits(1, &stack_lo, &stack_hi);
- jl_libjulia_internal_handle = jl_find_dynamic_library_by_addr(&jl_load_dynamic_library);
- jl_libjulia_handle = jl_find_dynamic_library_by_addr(&jl_any_type);
+ jl_libjulia_internal_handle = jl_find_dynamic_library_by_addr(&jl_load_dynamic_library, /* throw_err */ 1);
+ jl_libjulia_handle = jl_find_dynamic_library_by_addr(&jl_any_type, /* throw_err */ 1);
#ifdef _OS_WINDOWS_
jl_exe_handle = GetModuleHandleA(NULL);
jl_RTLD_DEFAULT_handle = jl_libjulia_internal_handle;
@@ -838,6 +757,15 @@ JL_DLLEXPORT void julia_init(JL_IMAGE_SEARCH rel)
if (jl_options.handle_signals == JL_OPTIONS_HANDLE_SIGNALS_ON)
jl_install_default_signal_handlers();
+#if (defined(_OS_LINUX_) && defined(_CPU_X86_64_)) || (defined(_OS_DARWIN_) && defined(_CPU_AARCH64_))
+ if (jl_options.safe_crash_log_file != NULL) {
+ jl_sig_fd = open(jl_options.safe_crash_log_file, O_WRONLY | O_CREAT | O_APPEND, 0600);
+ if (jl_sig_fd == -1) {
+ jl_error("fatal error: could not open safe crash log file for writing");
+ }
+ }
+#endif
+
jl_gc_init();
arraylist_new(&jl_linkage_blobs, 0);
@@ -861,83 +789,7 @@ JL_DLLEXPORT void julia_init(JL_IMAGE_SEARCH rel)
jl_task_t *ct = jl_init_root_task(ptls, stack_lo, stack_hi);
#pragma GCC diagnostic pop
JL_GC_PROMISE_ROOTED(ct);
- _finish_julia_init(rel, ptls, ct);
-}
-
-static NOINLINE void _finish_julia_init(JL_IMAGE_SEARCH rel, jl_ptls_t ptls, jl_task_t *ct)
-{
- JL_TIMING(JULIA_INIT, JULIA_INIT);
- jl_resolve_sysimg_location(rel);
- // loads sysimg if available, and conditionally sets jl_options.cpu_target
- if (rel == JL_IMAGE_IN_MEMORY) {
- jl_set_sysimg_so(jl_exe_handle);
- jl_options.image_file = jl_options.julia_bin;
- }
- else if (jl_options.image_file)
- jl_preload_sysimg_so(jl_options.image_file);
- if (jl_options.cpu_target == NULL)
- jl_options.cpu_target = "native";
- jl_init_codegen();
-
- jl_init_common_symbols();
- if (jl_options.image_file) {
- jl_restore_system_image(jl_options.image_file);
- } else {
- jl_init_types();
- jl_global_roots_list = (jl_genericmemory_t*)jl_an_empty_memory_any;
- jl_global_roots_keyset = (jl_genericmemory_t*)jl_an_empty_memory_any;
- }
-
- jl_init_flisp();
- jl_init_serializer();
-
- if (!jl_options.image_file) {
- jl_top_module = jl_core_module;
- jl_init_intrinsic_functions();
- jl_init_primitives();
- jl_init_main_module();
- jl_load(jl_core_module, "boot.jl");
- jl_current_task->world_age = jl_atomic_load_acquire(&jl_world_counter);
- post_boot_hooks();
- }
-
- if (jl_base_module == NULL) {
- // nthreads > 1 requires code in Base
- jl_atomic_store_relaxed(&jl_n_threads, 1);
- jl_n_markthreads = 0;
- jl_n_sweepthreads = 0;
- jl_n_gcthreads = 0;
- jl_n_threads_per_pool[JL_THREADPOOL_ID_INTERACTIVE] = 0;
- jl_n_threads_per_pool[JL_THREADPOOL_ID_DEFAULT] = 1;
- } else {
- jl_current_task->world_age = jl_atomic_load_acquire(&jl_world_counter);
- post_image_load_hooks();
- }
- jl_start_threads();
- jl_start_gc_threads();
- uv_barrier_wait(&thread_init_done);
-
- jl_gc_enable(1);
-
- if (jl_options.image_file && (!jl_generating_output() || jl_options.incremental) && jl_module_init_order) {
- jl_array_t *init_order = jl_module_init_order;
- JL_GC_PUSH1(&init_order);
- jl_module_init_order = NULL;
- int i, l = jl_array_nrows(init_order);
- for (i = 0; i < l; i++) {
- jl_value_t *mod = jl_array_ptr_ref(init_order, i);
- jl_module_run_initializer((jl_module_t*)mod);
- }
- JL_GC_POP();
- }
-
- if (jl_options.trim) {
- jl_entrypoint_mis = (arraylist_t *)malloc_s(sizeof(arraylist_t));
- arraylist_new(jl_entrypoint_mis, 0);
- }
-
- if (jl_options.handle_signals == JL_OPTIONS_HANDLE_SIGNALS_ON)
- jl_install_sigint_handler();
+ _finish_jl_init_(sysimage, ptls, ct);
}
#ifdef __cplusplus
diff --git a/src/interpreter.c b/src/interpreter.c
index 35c70a9ead2f1..7ab284df78dff 100644
--- a/src/interpreter.c
+++ b/src/interpreter.c
@@ -93,8 +93,7 @@ static jl_value_t *eval_methoddef(jl_expr_t *ex, interpreter_state *s)
if (!jl_is_symbol(fname)) {
jl_error("method: invalid declaration");
}
- jl_binding_t *b = jl_get_binding_for_method_def(modu, fname);
- return jl_declare_const_gf(b, modu, fname);
+ return jl_declare_const_gf(modu, fname);
}
jl_value_t *atypes = NULL, *meth = NULL, *fname = NULL;
@@ -634,9 +633,12 @@ static jl_value_t *eval_body(jl_array_t *stmts, interpreter_state *s, size_t ip,
s->locals[jl_source_nslots(s->src) + s->ip] = res;
}
else if (head == jl_globaldecl_sym) {
- jl_value_t *val = eval_value(jl_exprarg(stmt, 1), s);
- s->locals[jl_source_nslots(s->src) + s->ip] = val; // temporarily root
- jl_declare_global(s->module, jl_exprarg(stmt, 0), val);
+ jl_value_t *val = NULL;
+ if (jl_expr_nargs(stmt) >= 2) {
+ val = eval_value(jl_exprarg(stmt, 1), s);
+ s->locals[jl_source_nslots(s->src) + s->ip] = val; // temporarily root
+ }
+ jl_declare_global(s->module, jl_exprarg(stmt, 0), val, 1);
s->locals[jl_source_nslots(s->src) + s->ip] = jl_nothing;
}
else if (head == jl_const_sym) {
diff --git a/src/intrinsics.cpp b/src/intrinsics.cpp
index e4dc2459e8db6..563ce2fc1270c 100644
--- a/src/intrinsics.cpp
+++ b/src/intrinsics.cpp
@@ -454,6 +454,7 @@ static Value *emit_unbox(jl_codectx_t &ctx, Type *to, const jl_cgval_t &x, jl_va
Constant *c = x.constant ? julia_const_to_llvm(ctx, x.constant) : nullptr;
if ((x.inline_roots.empty() && !x.ispointer()) || c != nullptr) { // already unboxed, but sometimes need conversion
Value *unboxed = c ? c : x.V;
+ assert(unboxed); // clang-sa doesn't know that !x.ispointer() implies x.V does have a value
return emit_unboxed_coercion(ctx, to, unboxed);
}
@@ -461,6 +462,7 @@ static Value *emit_unbox(jl_codectx_t &ctx, Type *to, const jl_cgval_t &x, jl_va
Value *p = x.constant ? literal_pointer_val(ctx, x.constant) : x.V;
if (jt == (jl_value_t*)jl_bool_type || to->isIntegerTy(1)) {
+ assert(p && x.inline_roots.empty()); // clang-sa doesn't know that x.ispointer() implied these are true
jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, x.tbaa);
Instruction *unbox_load = ai.decorateInst(ctx.builder.CreateLoad(getInt8Ty(ctx.builder.getContext()), p));
setName(ctx.emission_context, unbox_load, p->getName() + ".unbox");
@@ -486,13 +488,14 @@ static Value *emit_unbox(jl_codectx_t &ctx, Type *to, const jl_cgval_t &x, jl_va
p = combined;
ai = combined_ai;
}
+ assert(p); // clang-sa doesn't know that x.ispointer() implied this is true
Instruction *load = ctx.builder.CreateAlignedLoad(to, p, Align(alignment));
setName(ctx.emission_context, load, p->getName() + ".unbox");
return ai.decorateInst(load);
}
// emit code to store a raw value into a destination
-static void emit_unbox_store(jl_codectx_t &ctx, const jl_cgval_t &x, Value *dest, MDNode *tbaa_dest, Align alignment, bool isVolatile)
+static void emit_unbox_store(jl_codectx_t &ctx, const jl_cgval_t &x, Value *dest, MDNode *tbaa_dest, MaybeAlign align_src, Align align_dst, bool isVolatile)
{
if (x.isghost) {
// this can happen when a branch yielding a different type ends
@@ -504,14 +507,14 @@ static void emit_unbox_store(jl_codectx_t &ctx, const jl_cgval_t &x, Value *dest
auto dest_ai = jl_aliasinfo_t::fromTBAA(ctx, tbaa_dest);
if (!x.inline_roots.empty()) {
- recombine_value(ctx, x, dest, dest_ai, alignment, isVolatile);
+ recombine_value(ctx, x, dest, dest_ai, align_dst, isVolatile);
return;
}
if (!x.ispointer()) { // already unboxed, but sometimes need conversion (e.g. f32 -> i32)
assert(x.V);
Value *unboxed = zext_struct(ctx, x.V);
- StoreInst *store = ctx.builder.CreateAlignedStore(unboxed, dest, alignment);
+ StoreInst *store = ctx.builder.CreateAlignedStore(unboxed, dest, align_dst);
store->setVolatile(isVolatile);
dest_ai.decorateInst(store);
return;
@@ -519,7 +522,7 @@ static void emit_unbox_store(jl_codectx_t &ctx, const jl_cgval_t &x, Value *dest
Value *src = data_pointer(ctx, x);
auto src_ai = jl_aliasinfo_t::fromTBAA(ctx, x.tbaa);
- emit_memcpy(ctx, dest, dest_ai, src, src_ai, jl_datatype_size(x.typ), Align(alignment), Align(julia_alignment(x.typ)), isVolatile);
+ emit_memcpy(ctx, dest, dest_ai, src, src_ai, jl_datatype_size(x.typ), Align(align_dst), align_src ? *align_src : Align(julia_alignment(x.typ)), isVolatile);
}
static jl_datatype_t *staticeval_bitstype(const jl_cgval_t &targ)
@@ -673,17 +676,23 @@ static jl_cgval_t generic_cast(
Type *to = bitstype_to_llvm((jl_value_t*)jlto, ctx.builder.getContext(), true);
Type *vt = bitstype_to_llvm(v.typ, ctx.builder.getContext(), true);
- // fptrunc fpext depend on the specific floating point format to work
- // correctly, and so do not pun their argument types.
+ // fptrunc and fpext depend on the specific floating point
+ // format to work correctly, and so do not pun their argument types.
if (!(f == fpext || f == fptrunc)) {
- if (toint)
- to = INTT(to, DL);
- else
- to = FLOATT(to);
- if (fromint)
- vt = INTT(vt, DL);
- else
- vt = FLOATT(vt);
+ // uitofp/sitofp require a specific float type argument
+ if (!(f == uitofp || f == sitofp)){
+ if (toint)
+ to = INTT(to, DL);
+ else
+ to = FLOATT(to);
+ }
+ // fptoui/fptosi require a specific float value argument
+ if (!(f == fptoui || f == fptosi)) {
+ if (fromint)
+ vt = INTT(vt, DL);
+ else
+ vt = FLOATT(vt);
+ }
}
if (!to || !vt)
@@ -1425,10 +1434,13 @@ static jl_cgval_t emit_intrinsic(jl_codectx_t &ctx, intrinsic f, jl_value_t **ar
if (!jl_is_primitivetype(xinfo.typ))
return emit_runtime_call(ctx, f, argv, nargs);
Type *xtyp = bitstype_to_llvm(xinfo.typ, ctx.builder.getContext(), true);
- if (float_func()[f])
- xtyp = FLOATT(xtyp);
- else
+ if (float_func()[f]) {
+ if (!xtyp->isFloatingPointTy())
+ return emit_runtime_call(ctx, f, argv, nargs);
+ }
+ else {
xtyp = INTT(xtyp, DL);
+ }
if (!xtyp)
return emit_runtime_call(ctx, f, argv, nargs);
////Bool are required to be in the range [0,1]
diff --git a/src/ircode.c b/src/ircode.c
index 99c5833ac3be7..ddd5bb29fdfac 100644
--- a/src/ircode.c
+++ b/src/ircode.c
@@ -547,7 +547,7 @@ static void jl_encode_value_(jl_ircode_state *s, jl_value_t *v, int as_literal)
}
}
-static jl_code_info_flags_t code_info_flags(uint8_t propagate_inbounds, uint8_t has_fcall,
+static jl_code_info_flags_t code_info_flags(uint8_t propagate_inbounds, uint8_t has_fcall, uint8_t has_image_globalref,
uint8_t nospecializeinfer, uint8_t isva,
uint8_t inlining, uint8_t constprop, uint8_t nargsmatchesmethod,
jl_array_t *ssaflags)
@@ -555,6 +555,7 @@ static jl_code_info_flags_t code_info_flags(uint8_t propagate_inbounds, uint8_t
jl_code_info_flags_t flags;
flags.bits.propagate_inbounds = propagate_inbounds;
flags.bits.has_fcall = has_fcall;
+ flags.bits.has_image_globalref = has_image_globalref;
flags.bits.nospecializeinfer = nospecializeinfer;
flags.bits.isva = isva;
flags.bits.inlining = inlining;
@@ -1036,7 +1037,7 @@ JL_DLLEXPORT jl_string_t *jl_compress_ir(jl_method_t *m, jl_code_info_t *code)
};
uint8_t nargsmatchesmethod = code->nargs == m->nargs;
- jl_code_info_flags_t flags = code_info_flags(code->propagate_inbounds, code->has_fcall,
+ jl_code_info_flags_t flags = code_info_flags(code->propagate_inbounds, code->has_fcall, code->has_image_globalref,
code->nospecializeinfer, code->isva,
code->inlining, code->constprop,
nargsmatchesmethod,
@@ -1134,6 +1135,7 @@ JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t
code->constprop = flags.bits.constprop;
code->propagate_inbounds = flags.bits.propagate_inbounds;
code->has_fcall = flags.bits.has_fcall;
+ code->has_image_globalref = flags.bits.has_image_globalref;
code->nospecializeinfer = flags.bits.nospecializeinfer;
code->isva = flags.bits.isva;
code->purity.bits = read_uint16(s.s);
@@ -1228,6 +1230,16 @@ JL_DLLEXPORT uint8_t jl_ir_flag_has_fcall(jl_string_t *data)
return flags.bits.has_fcall;
}
+JL_DLLEXPORT uint8_t jl_ir_flag_has_image_globalref(jl_string_t *data)
+{
+ if (jl_is_code_info(data))
+ return ((jl_code_info_t*)data)->has_image_globalref;
+ assert(jl_is_string(data));
+ jl_code_info_flags_t flags;
+ flags.packed = jl_string_data(data)[ir_offset_flags];
+ return flags.bits.has_image_globalref;
+}
+
JL_DLLEXPORT uint16_t jl_ir_inlining_cost(jl_string_t *data)
{
if (jl_is_code_info(data))
diff --git a/src/jitlayers.cpp b/src/jitlayers.cpp
index 0acb7beaca9ab..4537d069e4a44 100644
--- a/src/jitlayers.cpp
+++ b/src/jitlayers.cpp
@@ -213,13 +213,84 @@ static void jl_optimize_roots(jl_codegen_params_t ¶ms, jl_method_instance_t
JL_UNLOCK(&m->writelock);
}
-void jl_jit_globals(std::map &globals) JL_NOTSAFEPOINT
+static void finish_params(Module *M, jl_codegen_params_t ¶ms, SmallVector &sharedmodules) JL_NOTSAFEPOINT
{
- for (auto &global : globals) {
- jl_link_global(global.second, global.first);
+ if (params._shared_module) {
+ sharedmodules.push_back(orc::ThreadSafeModule(std::move(params._shared_module), params.tsctx));
+ }
+
+ // In imaging mode, we can't inline global variable initializers in order to preserve
+ // the fiction that we don't know what loads from the global will return. Thus, we
+ // need to emit a separate module for the globals before any functions are compiled,
+ // to ensure that the globals are defined when they are compiled.
+ if (jl_options.image_codegen) {
+ if (!params.global_targets.empty()) {
+ void **globalslots = new void*[params.global_targets.size()];
+ void **slot = globalslots;
+ for (auto &global : params.global_targets) {
+ auto GV = global.second;
+ *slot = global.first;
+ jl_ExecutionEngine->addGlobalMapping(GV->getName(), (uintptr_t)slot);
+ slot++;
+ }
+#ifdef __clang_analyzer__
+ static void **leaker = globalslots; // for the purpose of the analyzer, we need to expressly leak this variable or it thinks we forgot to free it
+#endif
+ }
+ }
+ else {
+ StringMap NewGlobals;
+ for (auto &global : params.global_targets) {
+ NewGlobals[global.second->getName()] = global.first;
+ }
+ for (auto &GV : M->globals()) {
+ auto InitValue = NewGlobals.find(GV.getName());
+ if (InitValue != NewGlobals.end()) {
+ jl_link_global(&GV, InitValue->second);
+ }
+ }
+ }
+}
+
+extern "C" JL_DLLEXPORT_CODEGEN
+void *jl_jit_abi_converter_impl(jl_task_t *ct, void *unspecialized, jl_value_t *declrt, jl_value_t *sigt, size_t nargs, int specsig,
+ jl_code_instance_t *codeinst, jl_callptr_t invoke, void *target, int target_specsig)
+{
+ if (codeinst == nullptr && unspecialized != nullptr)
+ return unspecialized;
+ orc::ThreadSafeModule result_m;
+ std::string gf_thunk_name;
+ {
+ jl_codegen_params_t params(std::make_unique(), jl_ExecutionEngine->getDataLayout(), jl_ExecutionEngine->getTargetTriple()); // Locks the context
+ params.getContext().setDiscardValueNames(true);
+ params.cache = true;
+ params.imaging_mode = 0;
+ result_m = jl_create_ts_module("gfthunk", params.tsctx, params.DL, params.TargetTriple);
+ Module *M = result_m.getModuleUnlocked();
+ if (target) {
+ Value *llvmtarget = literal_static_pointer_val((void*)target, PointerType::get(M->getContext(), 0));
+ gf_thunk_name = emit_abi_converter(M, params, declrt, sigt, nargs, specsig, codeinst, llvmtarget, target_specsig);
+ }
+ else if (invoke == jl_fptr_const_return_addr) {
+ gf_thunk_name = emit_abi_constreturn(M, params, declrt, sigt, nargs, specsig, codeinst->rettype_const);
+ }
+ else {
+ Value *llvminvoke = invoke ? literal_static_pointer_val((void*)invoke, PointerType::get(M->getContext(), 0)) : nullptr;
+ gf_thunk_name = emit_abi_dispatcher(M, params, declrt, sigt, nargs, specsig, codeinst, llvminvoke);
+ }
+ SmallVector sharedmodules;
+ finish_params(M, params, sharedmodules);
+ assert(sharedmodules.empty());
}
+ int8_t gc_state = jl_gc_safe_enter(ct->ptls);
+ jl_ExecutionEngine->addModule(std::move(result_m));
+ uintptr_t Addr = jl_ExecutionEngine->getFunctionAddress(gf_thunk_name);
+ jl_gc_safe_leave(ct->ptls, gc_state);
+ assert(Addr);
+ return (void*)Addr;
}
+
// lock for places where only single threaded behavior is implemented, so we need GC support
static jl_mutex_t jitlock;
// locks for adding external code to the JIT atomically
@@ -262,45 +333,6 @@ static DenseMap> incompl
// as materialization may need to acquire TSC locks.
-static void finish_params(Module *M, jl_codegen_params_t ¶ms) JL_NOTSAFEPOINT
-{
- if (params._shared_module) {
- sharedmodules.push_back(orc::ThreadSafeModule(std::move(params._shared_module), params.tsctx));
- }
-
- // In imaging mode, we can't inline global variable initializers in order to preserve
- // the fiction that we don't know what loads from the global will return. Thus, we
- // need to emit a separate module for the globals before any functions are compiled,
- // to ensure that the globals are defined when they are compiled.
- if (jl_options.image_codegen) {
- if (!params.global_targets.empty()) {
- void **globalslots = new void*[params.global_targets.size()];
- void **slot = globalslots;
- for (auto &global : params.global_targets) {
- auto GV = global.second;
- *slot = global.first;
- jl_ExecutionEngine->addGlobalMapping(GV->getName(), (uintptr_t)slot);
- slot++;
- }
-#ifdef __clang_analyzer__
- static void **leaker = globalslots; // for the purpose of the analyzer, we need to expressly leak this variable or it thinks we forgot to free it
-#endif
- }
- }
- else {
- StringMap NewGlobals;
- for (auto &global : params.global_targets) {
- NewGlobals[global.second->getName()] = global.first;
- }
- for (auto &GV : M->globals()) {
- auto InitValue = NewGlobals.find(GV.getName());
- if (InitValue != NewGlobals.end()) {
- jl_link_global(&GV, InitValue->second);
- }
- }
- }
-}
-
static int jl_analyze_workqueue(jl_code_instance_t *callee, jl_codegen_params_t ¶ms, bool forceall=false) JL_NOTSAFEPOINT_LEAVE JL_NOTSAFEPOINT_ENTER
{
jl_task_t *ct = jl_current_task;
@@ -351,8 +383,8 @@ static int jl_analyze_workqueue(jl_code_instance_t *callee, jl_codegen_params_t
}
if (preal_decl.empty()) {
// there may be an equivalent method already compiled (or at least registered with the JIT to compile), in which case we should be using that instead
- jl_code_instance_t *compiled_ci = jl_get_ci_equiv(codeinst, 1);
- if ((jl_value_t*)compiled_ci != jl_nothing) {
+ jl_code_instance_t *compiled_ci = jl_get_ci_equiv(codeinst, 0);
+ if (compiled_ci != codeinst) {
codeinst = compiled_ci;
uint8_t specsigflags;
void *fptr;
@@ -516,7 +548,7 @@ static void prepare_compile(jl_code_instance_t *codeinst) JL_NOTSAFEPOINT_LEAVE
waiting = jl_analyze_workqueue(codeinst, params, true); // may safepoint
assert(!waiting); (void)waiting;
Module *M = emittedmodules[codeinst].getModuleUnlocked();
- finish_params(M, params);
+ finish_params(M, params, sharedmodules);
incompletemodules.erase(it);
}
// and then indicate this should be compiled now
@@ -548,7 +580,7 @@ static void complete_emit(jl_code_instance_t *edge) JL_NOTSAFEPOINT_LEAVE JL_NOT
int waiting = jl_analyze_workqueue(callee, params); // may safepoint
assert(!waiting); (void)waiting;
Module *M = emittedmodules[callee].getModuleUnlocked();
- finish_params(M, params);
+ finish_params(M, params, sharedmodules);
incompletemodules.erase(it);
}
}
@@ -595,15 +627,18 @@ static void jl_compile_codeinst_now(jl_code_instance_t *codeinst)
// If logging of the compilation stream is enabled,
// then dump the method-instance specialization type to the stream
jl_method_instance_t *mi = jl_get_ci_mi(codeinst);
+ uint64_t end_time = jl_hrtime();
if (jl_is_method(mi->def.method)) {
auto stream = *jl_ExecutionEngine->get_dump_compiles_stream();
if (stream) {
- uint64_t end_time = jl_hrtime();
ios_printf(stream, "%" PRIu64 "\t\"", end_time - start_time);
jl_static_show((JL_STREAM*)stream, mi->specTypes);
ios_printf(stream, "\"\n");
}
}
+ jl_atomic_store_relaxed(&codeinst->time_compile,
+ julia_double_to_half(julia_half_to_float(jl_atomic_load_relaxed(&codeinst->time_compile))
+ + (end_time - start_time) * 1e-9));
lock.native.lock();
}
else {
@@ -764,146 +799,11 @@ void jl_emit_codeinst_to_jit_impl(
incompletemodules.try_emplace(codeinst, std::move(params), waiting);
}
else {
- finish_params(result_m.getModuleUnlocked(), params);
+ finish_params(result_m.getModuleUnlocked(), params, sharedmodules);
}
emittedmodules[codeinst] = std::move(result_m);
}
-
-const char *jl_generate_ccallable(Module *llvmmod, void *sysimg_handle, jl_value_t *declrt, jl_value_t *sigt, jl_codegen_params_t ¶ms);
-
-// compile a C-callable alias
-extern "C" JL_DLLEXPORT_CODEGEN
-int jl_compile_extern_c_impl(LLVMOrcThreadSafeModuleRef llvmmod, void *p, void *sysimg, jl_value_t *declrt, jl_value_t *sigt)
-{
- auto ct = jl_current_task;
- bool timed = (ct->reentrant_timing & 1) == 0;
- if (timed)
- ct->reentrant_timing |= 1;
- uint64_t compiler_start_time = 0;
- uint8_t measure_compile_time_enabled = jl_atomic_load_relaxed(&jl_measure_compile_time_enabled);
- if (measure_compile_time_enabled)
- compiler_start_time = jl_hrtime();
- jl_codegen_params_t *pparams = (jl_codegen_params_t*)p;
- DataLayout DL = pparams ? pparams->DL : jl_ExecutionEngine->getDataLayout();
- Triple TargetTriple = pparams ? pparams->TargetTriple : jl_ExecutionEngine->getTargetTriple();
- orc::ThreadSafeContext ctx;
- auto into = unwrap(llvmmod);
- orc::ThreadSafeModule backing;
- bool success = true;
- const char *name = "";
- if (into == NULL) {
- ctx = pparams ? pparams->tsctx : jl_ExecutionEngine->makeContext();
- backing = jl_create_ts_module("cextern", ctx, DL, TargetTriple);
- into = &backing;
- }
- { // params scope
- jl_codegen_params_t params(into->getContext(), DL, TargetTriple);
- if (pparams == NULL) {
- params.cache = p == NULL;
- params.imaging_mode = 0;
- params.tsctx.getContext()->setDiscardValueNames(true);
- pparams = ¶ms;
- }
- Module &M = *into->getModuleUnlocked();
- assert(pparams->tsctx.getContext() == &M.getContext());
- name = jl_generate_ccallable(&M, sysimg, declrt, sigt, *pparams);
- if (!sysimg && !p) {
- { // drop lock to keep analyzer happy (since it doesn't know we have the only reference to it)
- auto release = std::move(params.tsctx_lock);
- }
- { // lock scope
- jl_unique_gcsafe_lock lock(extern_c_lock);
- if (jl_ExecutionEngine->getGlobalValueAddress(name))
- success = false;
- }
- params.tsctx_lock = params.tsctx.getLock(); // re-acquire lock
- if (success && params.cache) {
- size_t newest_world = jl_atomic_load_acquire(&jl_world_counter);
- for (auto &it : params.workqueue) { // really just zero or one, and just the ABI not the rest of the metadata
- jl_code_instance_t *codeinst = it.first;
- JL_GC_PROMISE_ROOTED(codeinst);
- jl_code_instance_t *newest_ci = jl_type_infer(jl_get_ci_mi(codeinst), newest_world, SOURCE_MODE_ABI);
- if (newest_ci) {
- if (jl_egal(codeinst->rettype, newest_ci->rettype))
- it.first = codeinst;
- jl_compile_codeinst_now(newest_ci);
- }
- }
- jl_analyze_workqueue(nullptr, params, true);
- assert(params.workqueue.empty());
- finish_params(&M, params);
- }
- }
- pparams = nullptr;
- }
- if (!sysimg && success && llvmmod == NULL) {
- { // lock scope
- jl_unique_gcsafe_lock lock(extern_c_lock);
- if (!jl_ExecutionEngine->getGlobalValueAddress(name)) {
- {
- auto Lock = backing.getContext().getLock();
- jl_ExecutionEngine->optimizeDLSyms(*backing.getModuleUnlocked()); // safepoint
- }
- jl_ExecutionEngine->addModule(std::move(backing));
- success = jl_ExecutionEngine->getGlobalValueAddress(name);
- assert(success);
- }
- }
- }
- if (timed) {
- if (measure_compile_time_enabled) {
- auto end = jl_hrtime();
- jl_atomic_fetch_add_relaxed(&jl_cumulative_compile_time, end - compiler_start_time);
- }
- ct->reentrant_timing &= ~1ull;
- }
- return success;
-}
-
-// declare a C-callable entry point; called during code loading from the toplevel
-extern "C" JL_DLLEXPORT_CODEGEN
-void jl_extern_c_impl(jl_value_t *declrt, jl_tupletype_t *sigt)
-{
- // validate arguments. try to do as many checks as possible here to avoid
- // throwing errors later during codegen.
- JL_TYPECHK(@ccallable, type, declrt);
- if (!jl_is_tuple_type(sigt))
- jl_type_error("@ccallable", (jl_value_t*)jl_anytuple_type_type, (jl_value_t*)sigt);
- // check that f is a guaranteed singleton type
- jl_datatype_t *ft = (jl_datatype_t*)jl_tparam0(sigt);
- if (!jl_is_datatype(ft) || !jl_is_datatype_singleton(ft))
- jl_error("@ccallable: function object must be a singleton");
-
- // compute / validate return type
- if (!jl_is_concrete_type(declrt) || jl_is_kind(declrt))
- jl_error("@ccallable: return type must be concrete and correspond to a C type");
- if (!jl_type_mappable_to_c(declrt))
- jl_error("@ccallable: return type doesn't correspond to a C type");
-
- // validate method signature
- size_t i, nargs = jl_nparams(sigt);
- for (i = 1; i < nargs; i++) {
- jl_value_t *ati = jl_tparam(sigt, i);
- if (!jl_is_concrete_type(ati) || jl_is_kind(ati) || !jl_type_mappable_to_c(ati))
- jl_error("@ccallable: argument types must be concrete");
- }
-
- // save a record of this so that the alias is generated when we write an object file
- jl_method_t *meth = (jl_method_t*)jl_methtable_lookup(ft->name->mt, (jl_value_t*)sigt, jl_atomic_load_acquire(&jl_world_counter));
- if (!jl_is_method(meth))
- jl_error("@ccallable: could not find requested method");
- JL_GC_PUSH1(&meth);
- meth->ccallable = jl_svec2(declrt, (jl_value_t*)sigt);
- jl_gc_wb(meth, meth->ccallable);
- JL_GC_POP();
-
- // create the alias in the current runtime environment
- int success = jl_compile_extern_c(NULL, NULL, NULL, declrt, (jl_value_t*)sigt);
- if (!success)
- jl_error("@ccallable was already defined for this method name");
-}
-
extern "C" JL_DLLEXPORT_CODEGEN
int jl_compile_codeinst_impl(jl_code_instance_t *ci)
{
@@ -1381,7 +1281,7 @@ namespace {
#endif
#endif
uint32_t target_flags = 0;
- auto target = jl_get_llvm_target(jl_generating_output(), target_flags);
+ auto target = jl_get_llvm_target(jl_options.cpu_target, jl_generating_output(), target_flags);
auto &TheCPU = target.first;
SmallVector targetFeatures(target.second.begin(), target.second.end());
std::string errorstr;
@@ -2122,6 +2022,13 @@ void JuliaOJIT::addModule(orc::ThreadSafeModule TSM)
TSM = (*JITPointers)(std::move(TSM));
auto Lock = TSM.getContext().getLock();
Module &M = *TSM.getModuleUnlocked();
+
+ for (auto &f : M) {
+ if (!f.isDeclaration()){
+ jl_timing_puts(JL_TIMING_DEFAULT_BLOCK, f.getName().str().c_str());
+ }
+ }
+
// Treat this as if one of the passes might contain a safepoint
// even though that shouldn't be the case and might be unwise
Expected> Obj = CompileLayer.getCompiler()(M);
diff --git a/src/jitlayers.h b/src/jitlayers.h
index 4637670ec588c..b411febd792b8 100644
--- a/src/jitlayers.h
+++ b/src/jitlayers.h
@@ -212,6 +212,16 @@ struct jl_codegen_call_target_t {
bool specsig;
};
+// reification of a call to jl_jit_abi_convert, so that it isn't necessary to parse the Modules to recover this info
+struct cfunc_decl_t {
+ jl_value_t *declrt;
+ jl_value_t *sigt;
+ size_t nargs;
+ bool specsig;
+ llvm::GlobalVariable *theFptr;
+ llvm::GlobalVariable *cfuncdata;
+};
+
typedef SmallVector, 0> jl_workqueue_t;
typedef std::list> CallFrames;
@@ -227,13 +237,13 @@ struct jl_codegen_params_t {
typedef StringMap SymMapGV;
// outputs
jl_workqueue_t workqueue;
+ SmallVector cfuncs;
std::map global_targets;
jl_array_t *temporary_roots = nullptr;
std::map, GlobalVariable*> external_fns;
std::map ditypes;
std::map llvmtypes;
DenseMap mergedConstants;
- llvm::MapVector> enqueuers;
// Map from symbol name (in a certain library) to its GV in sysimg and the
// DL handle address in the current session.
StringMap> libMapGV;
@@ -274,6 +284,8 @@ struct jl_codegen_params_t {
~jl_codegen_params_t() JL_NOTSAFEPOINT JL_NOTSAFEPOINT_LEAVE = default;
};
+const char *jl_generate_ccallable(Module *llvmmod, jl_value_t *nameval, jl_value_t *declrt, jl_value_t *sigt, jl_codegen_params_t ¶ms);
+
jl_llvm_functions_t jl_emit_code(
orc::ThreadSafeModule &M,
jl_method_instance_t *mi,
@@ -288,6 +300,11 @@ jl_llvm_functions_t jl_emit_codeinst(
jl_code_info_t *src,
jl_codegen_params_t ¶ms);
+jl_llvm_functions_t jl_emit_codedecls(
+ orc::ThreadSafeModule &M,
+ jl_code_instance_t *codeinst,
+ jl_codegen_params_t ¶ms);
+
enum CompilationPolicy {
Default = 0,
Extern = 1,
@@ -296,6 +313,13 @@ enum CompilationPolicy {
Function *jl_cfunction_object(jl_function_t *f, jl_value_t *rt, jl_tupletype_t *argt,
jl_codegen_params_t ¶ms);
+extern "C" JL_DLLEXPORT_CODEGEN
+void *jl_jit_abi_convert(jl_task_t *ct, jl_value_t *declrt, jl_value_t *sigt, size_t nargs, bool specsig, _Atomic(void*) *fptr, _Atomic(size_t) *last_world, void *data);
+std::string emit_abi_dispatcher(Module *M, jl_codegen_params_t ¶ms, jl_value_t *declrt, jl_value_t *sigt, size_t nargs, bool specsig, jl_code_instance_t *codeinst, Value *invoke);
+std::string emit_abi_converter(Module *M, jl_codegen_params_t ¶ms, jl_value_t *declrt, jl_value_t *sigt, size_t nargs, bool specsig, jl_code_instance_t *codeinst, Value *target, bool target_specsig);
+std::string emit_abi_constreturn(Module *M, jl_codegen_params_t ¶ms, jl_value_t *declrt, jl_value_t *sigt, size_t nargs, bool specsig, jl_value_t *rettype_const);
+std::string emit_abi_constreturn(Module *M, jl_codegen_params_t ¶ms, bool specsig, jl_code_instance_t *codeinst);
+
Function *emit_tojlinvoke(jl_code_instance_t *codeinst, StringRef theFptrName, Module *M, jl_codegen_params_t ¶ms) JL_NOTSAFEPOINT;
void emit_specsig_to_fptr1(
Function *gf_thunk, jl_returninfo_t::CallingConv cc, unsigned return_roots,
@@ -308,6 +332,8 @@ void jl_init_function(Function *F, const Triple &TT) JL_NOTSAFEPOINT;
void add_named_global(StringRef name, void *addr) JL_NOTSAFEPOINT;
+Constant *literal_pointer_val_slot(jl_codegen_params_t ¶ms, Module *M, jl_value_t *p);
+
static inline Constant *literal_static_pointer_val(const void *p, Type *T) JL_NOTSAFEPOINT
{
// this function will emit a static pointer into the generated code
diff --git a/src/jl_exported_data.inc b/src/jl_exported_data.inc
index 62acce6ce1d65..df3b9c121837c 100644
--- a/src/jl_exported_data.inc
+++ b/src/jl_exported_data.inc
@@ -2,6 +2,7 @@
// Pointers that are exposed through the public libjulia
#define JL_EXPORTED_DATA_POINTERS(XX) \
+ XX(jl_abioverride_type) \
XX(jl_abstractarray_type) \
XX(jl_abstractstring_type) \
XX(jl_addrspace_type) \
@@ -65,6 +66,8 @@
XX(jl_interrupt_exception) \
XX(jl_intrinsic_type) \
XX(jl_kwcall_func) \
+ XX(jl_libdl_module) \
+ XX(jl_libdl_dlopen_func) \
XX(jl_lineinfonode_type) \
XX(jl_linenumbernode_type) \
XX(jl_llvmpointer_type) \
@@ -108,6 +111,7 @@
XX(jl_pinode_type) \
XX(jl_pointer_type) \
XX(jl_pointer_typename) \
+ XX(jl_precompilable_error) \
XX(jl_quotenode_type) \
XX(jl_readonlymemory_exception) \
XX(jl_ref_type) \
@@ -116,12 +120,12 @@
XX(jl_simplevector_type) \
XX(jl_slotnumber_type) \
XX(jl_ssavalue_type) \
- XX(jl_abioverride_type) \
XX(jl_stackovf_exception) \
XX(jl_string_type) \
XX(jl_symbol_type) \
XX(jl_task_type) \
XX(jl_top_module) \
+ XX(jl_trimfailure_type) \
XX(jl_true) \
XX(jl_tuple_typename) \
XX(jl_tvar_type) \
@@ -149,9 +153,6 @@
XX(jl_voidpointer_type) \
XX(jl_void_type) \
XX(jl_weakref_type) \
- XX(jl_libdl_module) \
- XX(jl_libdl_dlopen_func) \
- XX(jl_precompilable_error) \
// Data symbols that are defined inside the public libjulia
#define JL_EXPORTED_DATA_SYMBOLS(XX) \
diff --git a/src/jl_exported_funcs.inc b/src/jl_exported_funcs.inc
index 4d1ab94644e39..60a5256af2b58 100644
--- a/src/jl_exported_funcs.inc
+++ b/src/jl_exported_funcs.inc
@@ -2,7 +2,6 @@
#define JL_RUNTIME_EXPORTED_FUNCS(XX) \
XX(jl_active_task_stack) \
- XX(jl_add_standard_imports) \
XX(jl_adopt_thread) \
XX(jl_alignment) \
XX(jl_alloc_array_1d) \
@@ -40,9 +39,9 @@
XX(jl_atomic_store_bits) \
XX(jl_atomic_storeonce_bits) \
XX(jl_atomic_swap_bits) \
+ XX(jl_autoinit_and_adopt_thread) \
XX(jl_backtrace_from_here) \
XX(jl_base_relative_to) \
- XX(jl_binding_resolved_p) \
XX(jl_bitcast) \
XX(jl_boundp) \
XX(jl_bounds_error) \
@@ -72,6 +71,7 @@
XX(jl_call1) \
XX(jl_call2) \
XX(jl_call3) \
+ XX(jl_call4) \
XX(jl_calloc) \
XX(jl_call_in_typeinf_world) \
XX(jl_capture_interp_frame) \
@@ -97,7 +97,6 @@
XX(jl_cstr_to_string) \
XX(jl_current_exception) \
XX(jl_debug_method_invalidation) \
- XX(jl_defines_or_exports_p) \
XX(jl_deprecate_binding) \
XX(jl_dlclose) \
XX(jl_dlopen) \
@@ -192,8 +191,6 @@
XX(jl_get_ARCH) \
XX(jl_get_backtrace) \
XX(jl_get_binding) \
- XX(jl_get_binding_for_method_def) \
- XX(jl_get_binding_or_error) \
XX(jl_get_binding_wr) \
XX(jl_check_binding_currently_writable) \
XX(jl_get_cpu_name) \
@@ -213,6 +210,8 @@
XX(jl_get_module_infer) \
XX(jl_get_module_of_binding) \
XX(jl_get_module_optlevel) \
+ XX(jl_get_module_usings_backedges) \
+ XX(jl_get_module_binding_or_nothing) \
XX(jl_get_next_task) \
XX(jl_get_nth_field) \
XX(jl_get_nth_field_checked) \
@@ -241,11 +240,11 @@
XX(jl_hrtime) \
XX(jl_idtable_rehash) \
XX(jl_init) \
+ XX(jl_init_) \
XX(jl_init_options) \
XX(jl_init_restored_module) \
- XX(jl_init_with_image) \
- XX(jl_init_with_image__threading) \
- XX(jl_init__threading) \
+ XX(jl_init_with_image_file) \
+ XX(jl_init_with_image_handle) \
XX(jl_install_sigint_handler) \
XX(jl_instantiate_type_in_env) \
XX(jl_instantiate_unionall) \
@@ -316,7 +315,6 @@
XX(jl_module_names) \
XX(jl_module_parent) \
XX(jl_module_getloc) \
- XX(jl_module_public) \
XX(jl_module_public_p) \
XX(jl_module_use) \
XX(jl_module_using) \
@@ -363,7 +361,7 @@
XX(jl_pointerset) \
XX(jl_pop_handler) \
XX(jl_pop_handler_noexcept) \
- XX(jl_preload_sysimg_so) \
+ XX(jl_preload_sysimg) \
XX(jl_prepend_cwd) \
XX(jl_printf) \
XX(jl_print_backtrace) \
@@ -393,7 +391,6 @@
XX(jl_restore_incremental) \
XX(jl_restore_package_image_from_file) \
XX(jl_restore_system_image) \
- XX(jl_restore_system_image_data) \
XX(jl_rethrow) \
XX(jl_rethrow_other) \
XX(jl_running_on_valgrind) \
@@ -516,7 +513,6 @@
YY(jl_dump_function_ir) \
YY(jl_dump_method_asm) \
YY(jl_emit_codeinst_to_jit) \
- YY(jl_extern_c) \
YY(jl_get_llvmf_defn) \
YY(jl_get_llvm_function) \
YY(jl_get_llvm_module) \
@@ -533,7 +529,6 @@
YY(jl_register_fptrs) \
YY(jl_generate_fptr_for_unspecialized) \
YY(jl_compile_codeinst) \
- YY(jl_compile_extern_c) \
YY(jl_teardown_codegen) \
YY(jl_jit_total_bytes) \
YY(jl_create_native) \
@@ -547,6 +542,7 @@
YY(jl_getUnwindInfo) \
YY(jl_get_libllvm) \
YY(jl_register_passbuilder_callbacks) \
+ YY(jl_jit_abi_converter) \
YY(JLJITGetLLVMOrcExecutionSession) \
YY(JLJITGetJuliaOJIT) \
YY(JLJITGetExternalJITDylib) \
diff --git a/src/jl_uv.c b/src/jl_uv.c
index 3498952622dce..cb9c7c5e912fc 100644
--- a/src/jl_uv.c
+++ b/src/jl_uv.c
@@ -15,6 +15,7 @@
#include "errno.h"
#include
#include
+#include
#endif
#include "julia.h"
@@ -812,6 +813,83 @@ JL_DLLEXPORT int jl_printf(uv_stream_t *s, const char *format, ...)
return c;
}
+STATIC_INLINE int copystp(char *dest, const char *src)
+{
+ char *d = stpcpy(dest, src);
+ return (int)(d - dest);
+}
+
+// RAI-specific
+STATIC_INLINE void write_to_safe_crash_log(char *buf) JL_NOTSAFEPOINT
+{
+ int buflen = strlen(buf);
+ // Our telemetry on SPCS expects a JSON object per line.
+ // We ignore write failures because there is nothing we can do.
+ // We'll use a 2K byte buffer: 69 bytes for JSON message decorations,
+ // 1 byte for the terminating NUL character, and 3 bytes for an
+ // ellipsis if we have to truncate the message leaves `max_b` bytes
+ // for the message.
+ const int wbuflen = 2048;
+ const int max_b = wbuflen - 70 - 3;
+ char wbuf[wbuflen];
+ bzero(wbuf, wbuflen);
+ int wlen = 0;
+
+ // JSON preamble (32 bytes)
+ wlen += copystp(&wbuf[wlen], "\n{\"level\":\"Error\", \"timestamp\":\"");
+
+ // Timestamp (19 bytes)
+ struct timeval tv;
+ struct tm* tm_info;
+ gettimeofday(&tv, NULL);
+ tm_info = gmtime(&tv.tv_sec);
+ wlen += strftime(&wbuf[wlen], 42, "%Y-%m-%dT%H:%M:%S", tm_info);
+ sprintf(&wbuf[wlen], ".%03ld", (long)tv.tv_usec / 1000);
+ wlen += 4;
+
+ // JSON preamble to message (15 bytes)
+ wlen += copystp(&wbuf[wlen], "\", \"message\": \"");
+
+ // Message
+ // Each iteration will advance wlen by 1 or 2
+ for (size_t i = 0; i < buflen; i++) {
+ // Truncate the message if the write buffer is full
+ if (wlen == max_b || wlen == max_b - 1) {
+ wlen += copystp(&wbuf[wlen], "...");
+ break;
+ }
+ switch (buf[i]) {
+ case '"':
+ wlen += copystp(&wbuf[wlen], "\\\"");
+ break;
+ case '\b':
+ wlen += copystp(&wbuf[wlen], "\\b");
+ break;
+ case '\n':
+ wlen += copystp(&wbuf[wlen], "\\n");
+ break;
+ case '\r':
+ wlen += copystp(&wbuf[wlen], "\\r");
+ break;
+ case '\t':
+ wlen += copystp(&wbuf[wlen], "\\t");
+ break;
+ case '\\':
+ wlen += copystp(&wbuf[wlen], "\\\\");
+ break;
+ default:
+ wbuf[wlen++] = buf[i];
+ break;
+ }
+ }
+ // JSON completion (3 bytes)
+ wlen += copystp(&wbuf[wlen], "\"}\n");
+ write(jl_sig_fd, wbuf, wlen);
+ fdatasync(jl_sig_fd);
+}
+
+extern int jl_inside_heartbeat_thread(void);
+
JL_DLLEXPORT void jl_safe_printf(const char *fmt, ...)
{
static char buf[1000];
@@ -828,6 +906,12 @@ JL_DLLEXPORT void jl_safe_printf(const char *fmt, ...)
va_end(args);
buf[999] = '\0';
+ // order is important here: we want to ensure that the threading infra
+ // has been initialized before we start trying to print to the
+ // safe crash log file
+ if (jl_sig_fd != 0 && (jl_inside_signal_handler() || jl_inside_heartbeat_thread())) {
+ write_to_safe_crash_log(buf);
+ }
if (write(STDERR_FILENO, buf, strlen(buf)) < 0) {
// nothing we can do; ignore the failure
}
diff --git a/src/jlapi.c b/src/jlapi.c
index b8fbda801f43b..47d5b84fa5606 100644
--- a/src/jlapi.c
+++ b/src/jlapi.c
@@ -26,11 +26,13 @@ extern "C" {
#include
#endif
+static void jl_resolve_sysimg_location(JL_IMAGE_SEARCH rel, const char* julia_bindir);
+
/**
* @brief Check if Julia is already initialized.
*
- * Determine if Julia has been previously initialized
- * via `jl_init` or `jl_init_with_image`.
+ * Determine if Julia has been previously initialized via `jl_init` or
+ * `jl_init_with_image_file` or `jl_init_with_image_handle`.
*
* @return Returns 1 if Julia is initialized, 0 otherwise.
*/
@@ -68,6 +70,20 @@ JL_DLLEXPORT void jl_set_ARGS(int argc, char **argv)
}
}
+JL_DLLEXPORT void jl_init_with_image_handle(void *handle) {
+ if (jl_is_initialized())
+ return;
+
+ const char *image_path = jl_pathname_for_handle(handle);
+ jl_options.image_file = image_path;
+
+ jl_resolve_sysimg_location(JL_IMAGE_JULIA_HOME, NULL);
+ jl_image_buf_t sysimage = jl_set_sysimg_so(handle);
+
+ jl_init_(sysimage);
+
+ jl_exception_clear();
+}
/**
* @brief Initialize Julia with a specified system image file.
*
@@ -82,18 +98,21 @@ JL_DLLEXPORT void jl_set_ARGS(int argc, char **argv)
* @param image_path The path of a system image file (*.so). Interpreted as relative to julia_bindir
* or the default Julia home directory if not an absolute path.
*/
-JL_DLLEXPORT void jl_init_with_image(const char *julia_bindir,
- const char *image_path)
+JL_DLLEXPORT void jl_init_with_image_file(const char *julia_bindir,
+ const char *image_path)
{
if (jl_is_initialized())
return;
- libsupport_init();
- jl_options.julia_bindir = julia_bindir;
if (image_path != NULL)
jl_options.image_file = image_path;
else
jl_options.image_file = jl_get_default_sysimg_path();
- julia_init(JL_IMAGE_JULIA_HOME);
+
+ jl_resolve_sysimg_location(JL_IMAGE_JULIA_HOME, julia_bindir);
+ jl_image_buf_t sysimage = jl_preload_sysimg(jl_options.image_file);
+
+ jl_init_(sysimage);
+
jl_exception_clear();
}
@@ -105,31 +124,7 @@ JL_DLLEXPORT void jl_init_with_image(const char *julia_bindir,
*/
JL_DLLEXPORT void jl_init(void)
{
- char *libbindir = NULL;
-#ifdef _OS_WINDOWS_
- libbindir = strdup(jl_get_libdir());
-#else
- (void)asprintf(&libbindir, "%s" PATHSEPSTRING ".." PATHSEPSTRING "%s", jl_get_libdir(), "bin");
-#endif
- if (!libbindir) {
- printf("jl_init unable to find libjulia!\n");
- abort();
- }
- jl_init_with_image(libbindir, jl_get_default_sysimg_path());
- free(libbindir);
-}
-
-// HACK: remove this for Julia 1.8 (see )
-JL_DLLEXPORT void jl_init__threading(void)
-{
- jl_init();
-}
-
-// HACK: remove this for Julia 1.8 (see )
-JL_DLLEXPORT void jl_init_with_image__threading(const char *julia_bindir,
- const char *image_relative_path)
-{
- jl_init_with_image(julia_bindir, image_relative_path);
+ jl_init_with_image_file(NULL, jl_get_default_sysimg_path());
}
static void _jl_exception_clear(jl_task_t *ct) JL_NOTSAFEPOINT
@@ -437,6 +432,46 @@ JL_DLLEXPORT jl_value_t *jl_call3(jl_function_t *f, jl_value_t *a,
return v;
}
+/**
+ * @brief Call a Julia function with three arguments.
+ *
+ * A specialized case of `jl_call` for simpler scenarios.
+ *
+ * @param f A pointer to `jl_function_t` representing the Julia function to call.
+ * @param a A pointer to `jl_value_t` representing the first argument.
+ * @param b A pointer to `jl_value_t` representing the second argument.
+ * @param c A pointer to `jl_value_t` representing the third argument.
+ * @param d A pointer to `jl_value_t` representing the fourth argument.
+ * @return A pointer to `jl_value_t` representing the result of the function call.
+ */
+JL_DLLEXPORT jl_value_t *jl_call4(jl_function_t *f, jl_value_t *a,
+ jl_value_t *b, jl_value_t *c,
+ jl_value_t *d)
+{
+ jl_value_t *v;
+ jl_task_t *ct = jl_current_task;
+ JL_TRY {
+ jl_value_t **argv;
+ JL_GC_PUSHARGS(argv, 5);
+ argv[0] = f;
+ argv[1] = a;
+ argv[2] = b;
+ argv[3] = c;
+ argv[4] = d;
+ size_t last_age = ct->world_age;
+ ct->world_age = jl_get_world_counter();
+ v = jl_apply(argv, 5);
+ ct->world_age = last_age;
+ JL_GC_POP();
+ _jl_exception_clear(ct);
+ }
+ JL_CATCH {
+ ct->ptls->previous_exception = jl_current_exception(ct);
+ v = NULL;
+ }
+ return v;
+}
+
/**
* @brief Get a field from a Julia object.
*
@@ -1075,7 +1110,14 @@ JL_DLLEXPORT int jl_repl_entrypoint(int argc, char *argv[])
jl_error("Failed to self-execute");
}
- julia_init(jl_options.image_file_specified ? JL_IMAGE_CWD : JL_IMAGE_JULIA_HOME);
+ JL_IMAGE_SEARCH rel = jl_options.image_file_specified ? JL_IMAGE_CWD : JL_IMAGE_JULIA_HOME;
+ jl_resolve_sysimg_location(rel, NULL);
+ jl_image_buf_t sysimage = { JL_IMAGE_KIND_NONE };
+ if (jl_options.image_file)
+ sysimage = jl_preload_sysimg(jl_options.image_file);
+
+ jl_init_(sysimage);
+
if (lisp_prompt) {
jl_current_task->world_age = jl_get_world_counter();
jl_lisp_prompt();
@@ -1086,6 +1128,180 @@ JL_DLLEXPORT int jl_repl_entrypoint(int argc, char *argv[])
return ret;
}
+// create an absolute-path copy of the input path format string
+// formed as `joinpath(replace(pwd(), "%" => "%%"), in)`
+// unless `in` starts with `%`
+static const char *absformat(const char *in)
+{
+ if (in[0] == '%' || jl_isabspath(in))
+ return in;
+ // get an escaped copy of cwd
+ size_t path_size = JL_PATH_MAX;
+ char path[JL_PATH_MAX];
+ if (uv_cwd(path, &path_size)) {
+ jl_error("fatal error: unexpected error while retrieving current working directory");
+ }
+ size_t sz = strlen(in) + 1;
+ size_t i, fmt_size = 0;
+ for (i = 0; i < path_size; i++)
+ fmt_size += (path[i] == '%' ? 2 : 1);
+ char *out = (char*)malloc_s(fmt_size + 1 + sz);
+ fmt_size = 0;
+ for (i = 0; i < path_size; i++) { // copy-replace pwd portion
+ char c = path[i];
+ out[fmt_size++] = c;
+ if (c == '%')
+ out[fmt_size++] = '%';
+ }
+ out[fmt_size++] = PATHSEPSTRING[0]; // path sep
+ memcpy(out + fmt_size, in, sz); // copy over format, including nul
+ return out;
+}
+
+static char *absrealpath(const char *in, int nprefix)
+{ // compute an absolute realpath location, so that chdir doesn't change the file reference
+ // ignores (copies directly over) nprefix characters at the start of abspath
+#ifndef _OS_WINDOWS_
+ char *out = realpath(in + nprefix, NULL);
+ if (out) {
+ if (nprefix > 0) {
+ size_t sz = strlen(out) + 1;
+ char *cpy = (char*)malloc_s(sz + nprefix);
+ memcpy(cpy, in, nprefix);
+ memcpy(cpy + nprefix, out, sz);
+ free(out);
+ out = cpy;
+ }
+ }
+ else {
+ size_t sz = strlen(in + nprefix) + 1;
+ if (in[nprefix] == PATHSEPSTRING[0]) {
+ out = (char*)malloc_s(sz + nprefix);
+ memcpy(out, in, sz + nprefix);
+ }
+ else {
+ size_t path_size = JL_PATH_MAX;
+ char *path = (char*)malloc_s(JL_PATH_MAX);
+ if (uv_cwd(path, &path_size)) {
+ jl_error("fatal error: unexpected error while retrieving current working directory");
+ }
+ out = (char*)malloc_s(path_size + 1 + sz + nprefix);
+ memcpy(out, in, nprefix);
+ memcpy(out + nprefix, path, path_size);
+ out[nprefix + path_size] = PATHSEPSTRING[0];
+ memcpy(out + nprefix + path_size + 1, in + nprefix, sz);
+ free(path);
+ }
+ }
+#else
+ // GetFullPathName intentionally errors if given an empty string so manually insert `.` to invoke cwd
+ char *in2 = (char*)malloc_s(JL_PATH_MAX);
+ if (strlen(in) - nprefix == 0) {
+ memcpy(in2, in, nprefix);
+ in2[nprefix] = '.';
+ in2[nprefix+1] = '\0';
+ in = in2;
+ }
+ DWORD n = GetFullPathName(in + nprefix, 0, NULL, NULL);
+ if (n <= 0) {
+ jl_error("fatal error: jl_options.image_file path too long or GetFullPathName failed");
+ }
+ char *out = (char*)malloc_s(n + nprefix);
+ DWORD m = GetFullPathName(in + nprefix, n, out + nprefix, NULL);
+ if (n != m + 1) {
+ jl_error("fatal error: jl_options.image_file path too long or GetFullPathName failed");
+ }
+ memcpy(out, in, nprefix);
+ free(in2);
+#endif
+ return out;
+}
+
+static void jl_resolve_sysimg_location(JL_IMAGE_SEARCH rel, const char* julia_bindir)
+{
+ libsupport_init();
+ jl_init_timing();
+
+ // this function resolves the paths in jl_options to absolute file locations as needed
+ // and it replaces the pointers to `julia_bindir`, `julia_bin`, `image_file`, and output file paths
+ // it may fail, print an error, and exit(1) if any of these paths are longer than JL_PATH_MAX
+ //
+ // note: if you care about lost memory, you should call the appropriate `free()` function
+ // on the original pointer for each `char*` you've inserted into `jl_options`, after
+ // calling `jl_init_()`
+ char *free_path = (char*)malloc_s(JL_PATH_MAX);
+ size_t path_size = JL_PATH_MAX;
+ if (uv_exepath(free_path, &path_size)) {
+ jl_error("fatal error: unexpected error while retrieving exepath");
+ }
+ if (path_size >= JL_PATH_MAX) {
+ jl_error("fatal error: jl_options.julia_bin path too long");
+ }
+ jl_options.julia_bin = (char*)malloc_s(path_size + 1);
+ memcpy((char*)jl_options.julia_bin, free_path, path_size);
+ ((char*)jl_options.julia_bin)[path_size] = '\0';
+ if (julia_bindir == NULL) {
+ jl_options.julia_bindir = getenv("JULIA_BINDIR");
+ if (!jl_options.julia_bindir) {
+#ifdef _OS_WINDOWS_
+ jl_options.julia_bindir = strdup(jl_get_libdir());
+#else
+ int written = asprintf((char**)&jl_options.julia_bindir, "%s" PATHSEPSTRING ".." PATHSEPSTRING "%s", jl_get_libdir(), "bin");
+ if (written < 0)
+ abort(); // unexpected: memory allocation failed
+#endif
+ }
+ } else {
+ jl_options.julia_bindir = julia_bindir;
+ }
+ if (jl_options.julia_bindir)
+ jl_options.julia_bindir = absrealpath(jl_options.julia_bindir, 0);
+ free(free_path);
+ free_path = NULL;
+ if (jl_options.image_file) {
+ if (rel == JL_IMAGE_JULIA_HOME && !jl_isabspath(jl_options.image_file)) {
+ // build time path, relative to JULIA_BINDIR
+ free_path = (char*)malloc_s(JL_PATH_MAX);
+ int n = snprintf(free_path, JL_PATH_MAX, "%s" PATHSEPSTRING "%s",
+ jl_options.julia_bindir, jl_options.image_file);
+ if (n >= JL_PATH_MAX || n < 0) {
+ jl_error("fatal error: jl_options.image_file path too long");
+ }
+ jl_options.image_file = free_path;
+ }
+ if (jl_options.image_file)
+ jl_options.image_file = absrealpath(jl_options.image_file, 0);
+ if (free_path) {
+ free(free_path);
+ free_path = NULL;
+ }
+ }
+ if (jl_options.outputo)
+ jl_options.outputo = absrealpath(jl_options.outputo, 0);
+ if (jl_options.outputji)
+ jl_options.outputji = absrealpath(jl_options.outputji, 0);
+ if (jl_options.outputbc)
+ jl_options.outputbc = absrealpath(jl_options.outputbc, 0);
+ if (jl_options.outputasm)
+ jl_options.outputasm = absrealpath(jl_options.outputasm, 0);
+ if (jl_options.machine_file)
+ jl_options.machine_file = absrealpath(jl_options.machine_file, 0);
+ if (jl_options.output_code_coverage)
+ jl_options.output_code_coverage = absformat(jl_options.output_code_coverage);
+ if (jl_options.tracked_path)
+ jl_options.tracked_path = absrealpath(jl_options.tracked_path, 0);
+
+ const char **cmdp = jl_options.cmds;
+ if (cmdp) {
+ for (; *cmdp; cmdp++) {
+ const char *cmd = *cmdp;
+ if (cmd[0] == 'L') {
+ *cmdp = absrealpath(cmd, 1);
+ }
+ }
+ }
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/src/jlfrontend.scm b/src/jlfrontend.scm
index 9c69da199c0cd..c313a1e9b0db5 100644
--- a/src/jlfrontend.scm
+++ b/src/jlfrontend.scm
@@ -198,6 +198,12 @@
(error-wrap (lambda ()
(julia-expand-macroscope expr))))
+(define (jl-default-inner-ctor-body field-kinds file line)
+ (expand-to-thunk- (default-inner-ctor-body (cdr field-kinds) file line) file line))
+
+(define (jl-default-outer-ctor-body args file line)
+ (expand-to-thunk- (default-outer-ctor-body (cadr args) (caddr args) (cadddr args) file line) file line))
+
; run whole frontend on a string. useful for testing.
(define (fe str)
(expand-toplevel-expr (julia-parse str) 'none 0))
diff --git a/src/jloptions.c b/src/jloptions.c
index ac515bea19845..ff5757618f8b2 100644
--- a/src/jloptions.c
+++ b/src/jloptions.c
@@ -104,6 +104,7 @@ JL_DLLEXPORT void jl_init_options(void)
0, // nprocs
NULL, // machine_file
NULL, // project
+ NULL, // program_file
0, // isinteractive
0, // color
JL_OPTIONS_HISTORYFILE_ON, // history file
@@ -154,6 +155,7 @@ JL_DLLEXPORT void jl_init_options(void)
JL_TRIM_NO, // trim
0, // task_metrics
-1, // timeout_for_safepoint_straggler_s
+ NULL, // safe_crash_log_file
};
jl_options_initialized = 1;
}
@@ -169,11 +171,12 @@ static const char opts[] =
" --help-hidden Print uncommon options not shown by `-h`\n\n"
// startup options
- " --project[={|@temp|@.}] Set as the active project/environment.\n"
+ " --project[={|@temp|@.|@script[]}] Set as the active project/environment.\n"
" Or, create a temporary environment with `@temp`\n"
" The default @. option will search through parent\n"
" directories until a Project.toml or JuliaProject.toml\n"
- " file is found.\n"
+ " file is found. @script is similar, but searches up from\n"
+ " the programfile or a path relative to programfile.\n"
" -J, --sysimage Start up with the given system image file\n"
" -H, --home Set location of `julia` executable\n"
" --startup-file={yes*|no} Load `JULIA_DEPOT_PATH/config/startup.jl`; \n"
@@ -382,6 +385,7 @@ JL_DLLEXPORT void jl_parse_opts(int *argcp, char ***argvp)
opt_permalloc_pkgimg,
opt_trim,
opt_experimental_features,
+ opt_safe_crash_log_file,
};
static const char* const shortopts = "+vhqH:e:E:L:J:C:it:p:O:g:m:";
static const struct option longopts[] = {
@@ -450,6 +454,7 @@ JL_DLLEXPORT void jl_parse_opts(int *argcp, char ***argvp)
{ "permalloc-pkgimg",required_argument, 0, opt_permalloc_pkgimg },
{ "heap-size-hint", required_argument, 0, opt_heap_size_hint },
{ "trim", optional_argument, 0, opt_trim },
+ { "safe-crash-log-file", required_argument, 0, opt_safe_crash_log_file },
{ 0, 0, 0, 0 }
};
@@ -1006,12 +1011,17 @@ JL_DLLEXPORT void jl_parse_opts(int *argcp, char ***argvp)
jl_options.task_metrics = JL_OPTIONS_TASK_METRICS_ON;
else
jl_errorf("julia: invalid argument to --task-metrics={yes|no} (%s)", optarg);
+ case opt_safe_crash_log_file:
+ jl_options.safe_crash_log_file = strdup(optarg);
+ if (jl_options.safe_crash_log_file == NULL)
+ jl_error("julia: failed to allocate memory for --safe-crash-log-file");
break;
default:
jl_errorf("julia: unhandled option -- %c\n"
"This is a bug, please report it.", c);
}
}
+ jl_options.program_file = optind < argc ? strdup(argv[optind]) : "";
parsing_args_done:
if (!jl_options.use_experimental_features) {
if (jl_options.trim != JL_TRIM_NO)
diff --git a/src/jloptions.h b/src/jloptions.h
index a8cc4a9a9e33d..f5c8f72a2cb6c 100644
--- a/src/jloptions.h
+++ b/src/jloptions.h
@@ -21,6 +21,7 @@ typedef struct {
int32_t nprocs;
const char *machine_file;
const char *project;
+ const char *program_file;
int8_t isinteractive;
int8_t color;
int8_t historyfile;
@@ -67,6 +68,7 @@ typedef struct {
int8_t trim;
int8_t task_metrics;
int16_t timeout_for_safepoint_straggler_s;
+ const char *safe_crash_log_file;
} jl_options_t;
#endif
diff --git a/src/jltypes.c b/src/jltypes.c
index b94922ce9cf54..52e707b618211 100644
--- a/src/jltypes.c
+++ b/src/jltypes.c
@@ -3007,26 +3007,27 @@ void jl_init_types(void) JL_GC_DISABLED
jl_typename_type->name->mt = jl_nonfunction_mt;
jl_typename_type->super = jl_any_type;
jl_typename_type->parameters = jl_emptysvec;
- jl_typename_type->name->n_uninitialized = 16 - 2;
- jl_typename_type->name->names = jl_perm_symsvec(16, "name", "module",
+ jl_typename_type->name->n_uninitialized = 17 - 2;
+ jl_typename_type->name->names = jl_perm_symsvec(17, "name", "module",
"names", "atomicfields", "constfields",
"wrapper", "Typeofwrapper", "cache", "linearcache",
"mt", "partial",
"hash", "n_uninitialized",
"flags", // "abstract", "mutable", "mayinlinealloc",
- "max_methods", "constprop_heuristic");
- const static uint32_t typename_constfields[1] = { 0x00003a27 }; // (1<<0)|(1<<1)|(1<<2)|(1<<5)|(1<<9)|(1<<11)|(1<<12)|(1<<13) ; TODO: put back (1<<3)|(1<<4) in this list
- const static uint32_t typename_atomicfields[1] = { 0x00000180 }; // (1<<7)|(1<<8)
+ "cache_entry_count", "max_methods", "constprop_heuristic");
+ const static uint32_t typename_constfields[1] = { 0b00011101000100111 }; // TODO: put back atomicfields and constfields in this list
+ const static uint32_t typename_atomicfields[1] = { 0b00100000110000000 };
jl_typename_type->name->constfields = typename_constfields;
jl_typename_type->name->atomicfields = typename_atomicfields;
jl_precompute_memoized_dt(jl_typename_type, 1);
- jl_typename_type->types = jl_svec(16, jl_symbol_type, jl_any_type /*jl_module_type*/,
+ jl_typename_type->types = jl_svec(17, jl_symbol_type, jl_any_type /*jl_module_type*/,
jl_simplevector_type, jl_any_type/*jl_voidpointer_type*/, jl_any_type/*jl_voidpointer_type*/,
jl_type_type, jl_type_type, jl_simplevector_type, jl_simplevector_type,
jl_methtable_type, jl_any_type,
jl_any_type /*jl_long_type*/, jl_any_type /*jl_int32_type*/,
jl_any_type /*jl_uint8_type*/,
jl_any_type /*jl_uint8_type*/,
+ jl_any_type /*jl_uint8_type*/,
jl_any_type /*jl_uint8_type*/);
jl_methtable_type->name = jl_new_typename_in(jl_symbol("MethodTable"), core, 0, 1);
@@ -3262,12 +3263,14 @@ void jl_init_types(void) JL_GC_DISABLED
jl_binding_partition_type =
jl_new_datatype(jl_symbol("BindingPartition"), core, jl_any_type, jl_emptysvec,
- jl_perm_symsvec(5, "restriction", "min_world", "max_world", "next", "reserved"),
- jl_svec(5, jl_uint64_type /* Special GC-supported union of Any and flags*/,
+ jl_perm_symsvec(5, "restriction", "min_world", "max_world", "next", "kind"),
+ jl_svec(5, jl_any_type,
jl_ulong_type, jl_ulong_type, jl_any_type/*jl_binding_partition_type*/, jl_ulong_type),
jl_emptysvec, 0, 1, 0);
- const static uint32_t binding_partition_atomicfields[] = { 0b01101 }; // Set fields 1, 3, 4 as atomic
+ const static uint32_t binding_partition_atomicfields[] = { 0b01110 }; // Set fields 2, 3, 4 as atomic
jl_binding_partition_type->name->atomicfields = binding_partition_atomicfields;
+ const static uint32_t binding_partition_constfields[] = { 0b10001 }; // Set fields 1, 5 as constant
+ jl_binding_partition_type->name->constfields = binding_partition_constfields;
jl_binding_type =
jl_new_datatype(jl_symbol("Binding"), core, jl_any_type, jl_emptysvec,
@@ -3275,7 +3278,7 @@ void jl_init_types(void) JL_GC_DISABLED
jl_svec(5, jl_any_type/*jl_globalref_type*/, jl_any_type, jl_binding_partition_type,
jl_any_type, jl_uint8_type),
jl_emptysvec, 0, 1, 0);
- const static uint32_t binding_atomicfields[] = { 0x0005 }; // Set fields 2, 3 as atomic
+ const static uint32_t binding_atomicfields[] = { 0x0016 }; // Set fields 2, 3, 5 as atomic
jl_binding_type->name->atomicfields = binding_atomicfields;
const static uint32_t binding_constfields[] = { 0x0001 }; // Set fields 1 as constant
jl_binding_type->name->constfields = binding_constfields;
@@ -3287,10 +3290,9 @@ void jl_init_types(void) JL_GC_DISABLED
jl_emptysvec, 0, 0, 3);
core = jl_new_module(jl_symbol("Core"), NULL);
- core->parent = core;
jl_type_typename->mt->module = core;
jl_core_module = core;
- core = NULL; // not ready yet to use
+ core = NULL; // not actually ready yet to use
tv = jl_svec1(tvar("Backend"));
jl_addrspace_typename =
@@ -3381,9 +3383,8 @@ void jl_init_types(void) JL_GC_DISABLED
core = jl_core_module;
jl_atomic_store_relaxed(&core->bindingkeyset, (jl_genericmemory_t*)jl_an_empty_memory_any);
// export own name, so "using Foo" makes "Foo" itself visible
- jl_set_const(core, core->name, (jl_value_t*)core);
- jl_module_public(core, core->name, 1);
- jl_set_const(core, jl_symbol("CPU"), (jl_value_t*)cpumem);
+ jl_set_initial_const(core, core->name, (jl_value_t*)core, 1);
+ jl_set_initial_const(core, jl_symbol("CPU"), (jl_value_t*)cpumem, 0);
core = NULL;
jl_expr_type =
@@ -3485,7 +3486,7 @@ void jl_init_types(void) JL_GC_DISABLED
jl_code_info_type =
jl_new_datatype(jl_symbol("CodeInfo"), core,
jl_any_type, jl_emptysvec,
- jl_perm_symsvec(22,
+ jl_perm_symsvec(23,
"code",
"debuginfo",
"ssavaluetypes",
@@ -3502,13 +3503,14 @@ void jl_init_types(void) JL_GC_DISABLED
"nargs",
"propagate_inbounds",
"has_fcall",
+ "has_image_globalref",
"nospecializeinfer",
"isva",
"inlining",
"constprop",
"purity",
"inlining_cost"),
- jl_svec(22,
+ jl_svec(23,
jl_array_any_type,
jl_debuginfo_type,
jl_any_type,
@@ -3527,6 +3529,7 @@ void jl_init_types(void) JL_GC_DISABLED
jl_bool_type,
jl_bool_type,
jl_bool_type,
+ jl_bool_type,
jl_uint8_type,
jl_uint8_type,
jl_uint16_type,
@@ -3537,13 +3540,13 @@ void jl_init_types(void) JL_GC_DISABLED
jl_method_type =
jl_new_datatype(jl_symbol("Method"), core,
jl_any_type, jl_emptysvec,
- jl_perm_symsvec(31,
+ jl_perm_symsvec(32,
"name",
"module",
"file",
"line",
+ "dispatch_status", // atomic
"primary_world", // atomic
- "deleted_world", // atomic
"sig",
"specializations", // !const
"speckeyset", // !const
@@ -3566,15 +3569,16 @@ void jl_init_types(void) JL_GC_DISABLED
"isva",
"is_for_opaque_closure",
"nospecializeinfer",
+ "did_scan_source",
"constprop",
"max_varargs",
"purity"),
- jl_svec(31,
+ jl_svec(32,
jl_symbol_type,
jl_module_type,
jl_symbol_type,
jl_int32_type,
- jl_ulong_type,
+ jl_int32_type,
jl_ulong_type,
jl_type_type,
jl_any_type, // union(jl_simplevector_type, jl_method_instance_type),
@@ -3600,6 +3604,7 @@ void jl_init_types(void) JL_GC_DISABLED
jl_bool_type,
jl_uint8_type,
jl_uint8_type,
+ jl_uint8_type,
jl_uint16_type),
jl_emptysvec,
0, 1, 10);
@@ -3639,7 +3644,7 @@ void jl_init_types(void) JL_GC_DISABLED
jl_code_instance_type =
jl_new_datatype(jl_symbol("CodeInstance"), core,
jl_any_type, jl_emptysvec,
- jl_perm_symsvec(17,
+ jl_perm_symsvec(21,
"def",
"owner",
"next",
@@ -3651,12 +3656,16 @@ void jl_init_types(void) JL_GC_DISABLED
"inferred",
"debuginfo",
"edges",
- //"absolute_max",
- "ipo_purity_bits",
"analysis_results",
+ "ipo_purity_bits",
+ "time_infer_total",
+ "time_infer_cache_saved",
+ "time_infer_self",
+ "time_compile",
+ //"absolute_max",
"specsigflags", "precompile",
"invoke", "specptr"), // function object decls
- jl_svec(17,
+ jl_svec(21,
jl_any_type,
jl_any_type,
jl_any_type,
@@ -3668,17 +3677,21 @@ void jl_init_types(void) JL_GC_DISABLED
jl_any_type,
jl_debuginfo_type,
jl_simplevector_type,
- //jl_bool_type,
- jl_uint32_type,
jl_any_type,
- jl_bool_type,
+ jl_uint32_type,
+ jl_uint16_type,
+ jl_uint16_type,
+ jl_uint16_type,
+ jl_uint16_type,
+ //jl_bool_type,
+ jl_uint8_type,
jl_bool_type,
jl_any_type, jl_any_type), // fptrs
jl_emptysvec,
0, 1, 1);
jl_svecset(jl_code_instance_type->types, 2, jl_code_instance_type);
- const static uint32_t code_instance_constfields[1] = { 0b00001000011100011 }; // Set fields 1, 2, 6-8, 13 as const
- const static uint32_t code_instance_atomicfields[1] = { 0b11110111100011100 }; // Set fields 3-5, 9-12, 14-17 as atomic
+ const static uint32_t code_instance_constfields[1] = { 0b000001110100011100011 }; // Set fields 1, 2, 6-8, 12, 14-16 as const
+ const static uint32_t code_instance_atomicfields[1] = { 0b111110001011100011100 }; // Set fields 3-5, 9-12, 13, 17-21 as atomic
// Fields 4-5 are only operated on by construction and deserialization, so are effectively const at runtime
// Fields ipo_purity_bits and analysis_results are not currently threadsafe or reliable, as they get mutated after optimization, but are not declared atomic
// and there is no way to tell (during inference) if their value is finalized yet (to wait for them to be narrowed if applicable)
@@ -3844,6 +3857,7 @@ void jl_init_types(void) JL_GC_DISABLED
jl_svecset(jl_typename_type->types, 13, jl_uint8_type);
jl_svecset(jl_typename_type->types, 14, jl_uint8_type);
jl_svecset(jl_typename_type->types, 15, jl_uint8_type);
+ jl_svecset(jl_typename_type->types, 16, jl_uint8_type);
jl_svecset(jl_methtable_type->types, 4, jl_long_type);
jl_svecset(jl_methtable_type->types, 5, jl_module_type);
jl_svecset(jl_methtable_type->types, 6, jl_array_any_type);
@@ -3854,8 +3868,8 @@ void jl_init_types(void) JL_GC_DISABLED
jl_svecset(jl_method_type->types, 13, jl_method_instance_type);
//jl_svecset(jl_debuginfo_type->types, 0, jl_method_instance_type); // union(jl_method_instance_type, jl_method_type, jl_symbol_type)
jl_svecset(jl_method_instance_type->types, 4, jl_code_instance_type);
- jl_svecset(jl_code_instance_type->types, 15, jl_voidpointer_type);
- jl_svecset(jl_code_instance_type->types, 16, jl_voidpointer_type);
+ jl_svecset(jl_code_instance_type->types, 19, jl_voidpointer_type);
+ jl_svecset(jl_code_instance_type->types, 20, jl_voidpointer_type);
jl_svecset(jl_binding_type->types, 0, jl_globalref_type);
jl_svecset(jl_binding_type->types, 3, jl_array_any_type);
jl_svecset(jl_binding_partition_type->types, 3, jl_binding_partition_type);
@@ -3875,8 +3889,11 @@ void jl_init_types(void) JL_GC_DISABLED
// override ismutationfree for builtin types that are mutable for identity
jl_string_type->ismutationfree = jl_string_type->isidentityfree = 1;
jl_symbol_type->ismutationfree = jl_symbol_type->isidentityfree = 1;
- jl_simplevector_type->ismutationfree = jl_simplevector_type->isidentityfree = 1;
+ jl_simplevector_type->isidentityfree = 1;
+ jl_typename_type->ismutationfree = 1;
jl_datatype_type->ismutationfree = 1;
+ jl_uniontype_type->ismutationfree = 1;
+ jl_unionall_type->ismutationfree = 1;
assert(((jl_datatype_t*)jl_array_any_type)->ismutationfree == 0);
assert(((jl_datatype_t*)jl_array_uint8_type)->ismutationfree == 0);
@@ -3927,27 +3944,30 @@ void post_boot_hooks(void)
jl_int32_type->super = jl_signed_type;
jl_int64_type->super = jl_signed_type;
- jl_errorexception_type = (jl_datatype_t*)core("ErrorException");
- jl_stackovf_exception = jl_new_struct_uninit((jl_datatype_t*)core("StackOverflowError"));
- jl_diverror_exception = jl_new_struct_uninit((jl_datatype_t*)core("DivideError"));
- jl_undefref_exception = jl_new_struct_uninit((jl_datatype_t*)core("UndefRefError"));
- jl_undefvarerror_type = (jl_datatype_t*)core("UndefVarError");
- jl_fielderror_type = (jl_datatype_t*)core("FieldError");
- jl_atomicerror_type = (jl_datatype_t*)core("ConcurrencyViolationError");
- jl_interrupt_exception = jl_new_struct_uninit((jl_datatype_t*)core("InterruptException"));
- jl_boundserror_type = (jl_datatype_t*)core("BoundsError");
- jl_memory_exception = jl_new_struct_uninit((jl_datatype_t*)core("OutOfMemoryError"));
+ jl_stackovf_exception = jl_new_struct_uninit((jl_datatype_t*)core("StackOverflowError"));
+ jl_diverror_exception = jl_new_struct_uninit((jl_datatype_t*)core("DivideError"));
+ jl_undefref_exception = jl_new_struct_uninit((jl_datatype_t*)core("UndefRefError"));
+ jl_interrupt_exception = jl_new_struct_uninit((jl_datatype_t*)core("InterruptException"));
+ jl_memory_exception = jl_new_struct_uninit((jl_datatype_t*)core("OutOfMemoryError"));
jl_readonlymemory_exception = jl_new_struct_uninit((jl_datatype_t*)core("ReadOnlyMemoryError"));
- jl_typeerror_type = (jl_datatype_t*)core("TypeError");
- jl_argumenterror_type = (jl_datatype_t*)core("ArgumentError");
- jl_methoderror_type = (jl_datatype_t*)core("MethodError");
- jl_loaderror_type = (jl_datatype_t*)core("LoadError");
- jl_initerror_type = (jl_datatype_t*)core("InitError");
+ jl_precompilable_error = jl_new_struct_uninit((jl_datatype_t*)core("PrecompilableError"));
+
+ jl_errorexception_type = (jl_datatype_t*)core("ErrorException");
+ jl_undefvarerror_type = (jl_datatype_t*)core("UndefVarError");
+ jl_fielderror_type = (jl_datatype_t*)core("FieldError");
+ jl_atomicerror_type = (jl_datatype_t*)core("ConcurrencyViolationError");
+ jl_boundserror_type = (jl_datatype_t*)core("BoundsError");
+ jl_typeerror_type = (jl_datatype_t*)core("TypeError");
+ jl_argumenterror_type = (jl_datatype_t*)core("ArgumentError");
+ jl_methoderror_type = (jl_datatype_t*)core("MethodError");
+ jl_loaderror_type = (jl_datatype_t*)core("LoadError");
+ jl_initerror_type = (jl_datatype_t*)core("InitError");
jl_missingcodeerror_type = (jl_datatype_t*)core("MissingCodeError");
- jl_precompilable_error = jl_new_struct_uninit((jl_datatype_t*)core("PrecompilableError"));
- jl_pair_type = core("Pair");
- jl_kwcall_func = core("kwcall");
- jl_kwcall_mt = ((jl_datatype_t*)jl_typeof(jl_kwcall_func))->name->mt;
+ jl_trimfailure_type = (jl_datatype_t*)core("TrimFailure");
+
+ jl_pair_type = core("Pair");
+ jl_kwcall_func = core("kwcall");
+ jl_kwcall_mt = ((jl_datatype_t*)jl_typeof(jl_kwcall_func))->name->mt;
jl_atomic_store_relaxed(&jl_kwcall_mt->max_args, 0);
jl_weakref_type = (jl_datatype_t*)core("WeakRef");
diff --git a/src/julia-parser.scm b/src/julia-parser.scm
index 891a26bb0ea49..4415dc8686065 100644
--- a/src/julia-parser.scm
+++ b/src/julia-parser.scm
@@ -1329,13 +1329,13 @@
(define (valid-func-sig? paren sig)
(and (pair? sig)
- (or (eq? (car sig) 'call)
- (eq? (car sig) 'tuple)
+ (or (memq (car sig) '(call tuple))
+ (and (not paren) (eq? (car sig) 'macrocall))
(and paren (eq? (car sig) 'block))
(and paren (eq? (car sig) '...))
(and (eq? (car sig) '|::|)
(pair? (cadr sig))
- (eq? (car (cadr sig)) 'call))
+ (memq (car (cadr sig)) '(call macrocall)))
(and (eq? (car sig) 'where)
(valid-func-sig? paren (cadr sig))))))
diff --git a/src/julia-syntax.scm b/src/julia-syntax.scm
index 97d76e7762a9e..3c003b04e4ce4 100644
--- a/src/julia-syntax.scm
+++ b/src/julia-syntax.scm
@@ -183,6 +183,7 @@
(meta ret-type ,R)
,@(list-tail body (+ 1 (length meta))))))))))
+
;; convert x<:T<:y etc. exprs into (name lower-bound upper-bound)
;; a bound is #f if not specified
(define (analyze-typevar e)
@@ -193,10 +194,14 @@
(cond ((atom? e) (list (check-sym e) #f #f))
((eq? (car e) 'var-bounds) (cdr e))
((and (eq? (car e) 'comparison) (length= e 6))
- (cons (check-sym (cadddr e))
- (cond ((and (eq? (caddr e) '|<:|) (eq? (caddr (cddr e)) '|<:|))
- (list (cadr e) (last e)))
- (else (error "invalid bounds in \"where\"")))))
+ (let* ((lhs (list-ref e 1))
+ (rel (list-ref e 2))
+ (t (check-sym (list-ref e 3)))
+ (rel-same (eq? rel (list-ref e 4)))
+ (rhs (list-ref e 5)))
+ (cond ((and rel-same (eq? rel '|<:|)) (list t lhs rhs))
+ ((and rel-same (eq? rel '|>:|)) (list t rhs lhs))
+ (else (error "invalid bounds in \"where\"")))))
((eq? (car e) '|<:|)
(list (check-sym (cadr e)) #f (caddr e)))
((eq? (car e) '|>:|)
@@ -753,64 +758,34 @@
(params bounds) (sparam-name-bounds params)
(struct-def-expr- name params bounds super (flatten-blocks fields) mut)))
-;; replace field names with gensyms if they conflict with field-types
-(define (safe-field-names field-names field-types)
- (if (any (lambda (v) (contains (lambda (e) (eq? e v)) field-types))
- field-names)
- (map (lambda (x) (gensy)) field-names)
- ;; use a different name for a field called `_`
- (map (lambda (x) (if (eq? x '_) (gensy) x)) field-names)))
-
-(define (with-wheres call wheres)
- (if (pair? wheres)
- `(where ,call ,@wheres)
- call))
-
-(define (default-inner-ctors name field-names field-types params bounds locs)
- (let* ((field-names (safe-field-names field-names field-types))
- (all-ctor (if (null? params)
- ;; definition with exact types for all arguments
- `(function (call ,name
- ,@(map make-decl field-names field-types))
- (block
- ,@locs
- (new (globalref (thismodule) ,name) ,@field-names)))
- #f))
- (any-ctor (if (or (not all-ctor) (any (lambda (t) (not (equal? t '(core Any))))
- field-types))
- ;; definition with Any for all arguments
- ;; only if any field type is not Any, checked at runtime
- `(function (call (|::| |#ctor-self#|
- ,(with-wheres
- `(curly (core Type) ,(if (pair? params)
- `(curly ,name ,@params)
- name))
- (map (lambda (b) (cons 'var-bounds b)) bounds)))
- ,@field-names)
- (block
- ,@locs
- (call new ,@field-names))) ; this will add convert calls later
- #f)))
- (if all-ctor
- (if any-ctor
- (list all-ctor
- `(if ,(foldl (lambda (t u)
- `(&& ,u (call (core ===) (core Any) ,t)))
- `(call (core ===) (core Any) ,(car field-types))
- (cdr field-types))
- '(block)
- ,any-ctor))
- (list all-ctor))
- (list any-ctor))))
-
-(define (default-outer-ctor name field-names field-types params bounds locs)
- (let ((field-names (safe-field-names field-names field-types)))
- `(function ,(with-wheres
- `(call ,name ,@(map make-decl field-names field-types))
- (map (lambda (b) (cons 'var-bounds b)) bounds))
- (block
- ,@locs
- (new (curly ,name ,@params) ,@field-names)))))
+;; definition with Any for all arguments (except type, which is exact)
+;; field-kinds:
+;; -1 no convert (e.g. because it is Any)
+;; 0 normal convert to fieldtype
+;; 1+ static_parameter N
+(define (default-inner-ctor-body field-kinds file line)
+ (let* ((name '|#ctor-self#|)
+ (field-names (map (lambda (idx) (symbol (string "_" (+ idx 1)))) (iota (length field-kinds))))
+ (field-convert (lambda (fld fty val)
+ (cond ((eq? fty -1) val)
+ ((> fty 0) (convert-for-type-decl val `(static_parameter ,fty) #f #f))
+ (else (convert-for-type-decl val `(call (core fieldtype) ,name ,(+ fld 1)) #f #f)))))
+ (field-vals (map field-convert (iota (length field-names)) field-kinds field-names))
+ (body `(block
+ (line ,line ,file)
+ (return (new ,name ,@field-vals)))))
+ `(lambda ,(cons name field-names) () (scope-block ,body))))
+
+;; definition with exact types for all arguments (except type, which is not parameterized)
+(define (default-outer-ctor-body thistype field-count sparam-count file line)
+ (let* ((name '|#ctor-self#|)
+ (field-names (map (lambda (idx) (symbol (string "_" (+ idx 1)))) (iota field-count)))
+ (sparams (map (lambda (idx) `(static_parameter ,(+ idx 1))) (iota sparam-count)))
+ (type (if (null? sparams) name `(curly ,thistype ,@sparams)))
+ (body `(block
+ (line ,line ,file)
+ (return (new ,type ,@field-names)))))
+ `(lambda ,(cons name field-names) () (scope-block ,body))))
(define (num-non-varargs args)
(count (lambda (a) (not (vararg? a))) args))
@@ -993,16 +968,15 @@
fields)))
(attrs (reverse attrs))
(defs (filter (lambda (x) (not (or (effect-free? x) (eq? (car x) 'string)))) defs))
- (locs (if (and (pair? fields0) (linenum? (car fields0)))
- (list (car fields0))
- '()))
+ (loc (if (and (pair? fields0) (linenum? (car fields0)))
+ (car fields0)
+ '(line 0 ||)))
(field-names (map decl-var fields))
(field-types (map decl-type fields))
- (defs2 (if (null? defs)
- (default-inner-ctors name field-names field-types params bounds locs)
- defs))
(min-initialized (min (ctors-min-initialized defs) (length fields)))
- (prev (make-ssavalue)))
+ (hasprev (make-ssavalue))
+ (prev (make-ssavalue))
+ (newdef (make-ssavalue)))
(let ((dups (has-dups field-names)))
(if dups (error (string "duplicate field name: \"" (car dups) "\" is not unique"))))
(for-each (lambda (v)
@@ -1023,52 +997,38 @@
(call (core svec) ,@attrs)
,mut ,min-initialized))
(call (core _setsuper!) ,name ,super)
- (if (call (core isdefinedglobal) (thismodule) (inert ,name) (false))
- (block
- (= ,prev (globalref (thismodule) ,name))
- (if (call (core _equiv_typedef) ,prev ,name)
- ;; if this is compatible with an old definition, use the existing type object
- ;; and its parameters
- (block (= ,name ,prev)
- ,@(if (pair? params)
- `((= (tuple ,@params) (|.|
- ,(foldl (lambda (_ x) `(|.| ,x (quote body)))
- prev
- params)
- (quote parameters))))
- '())))))
- (call (core _typebody!) ,name (call (core svec) ,@(insert-struct-shim field-types name)))
- (const (globalref (thismodule) ,name) ,name)
+ (= ,hasprev (&& (call (core isdefinedglobal) (thismodule) (inert ,name) (false)) (call (core _equiv_typedef) (globalref (thismodule) ,name) ,name)))
+ (= ,prev (if ,hasprev (globalref (thismodule) ,name) (false)))
+ (if ,hasprev
+ ;; if this is compatible with an old definition, use the old parameters, but the
+ ;; new object. This will fail to capture recursive cases, but the call to typebody!
+ ;; below is permitted to choose either type definition to put into the binding table
+ (block ,@(if (pair? params)
+ `((= (tuple ,@params) (|.|
+ ,(foldl (lambda (_ x) `(|.| ,x (quote body)))
+ prev
+ params)
+ (quote parameters))))
+ '())))
+ (= ,newdef (call (core _typebody!) ,prev ,name (call (core svec) ,@(insert-struct-shim field-types name))))
+ (const (globalref (thismodule) ,name) ,newdef)
(latestworld)
(null)))
- ;; "inner" constructors
- (scope-block
- (block
- (hardscope)
- (global ,name)
- ,@(map (lambda (c)
- (rewrite-ctor c name params field-names field-types))
- defs2)))
- ;; "outer" constructors
- ,@(if (and (null? defs)
- (not (null? params))
- ;; To generate an outer constructor, each parameter must occur in a field
- ;; type, or in the bounds of a subsequent parameter.
- ;; Otherwise the constructor would not work, since the parameter values
- ;; would never be specified.
- (let loop ((root-types field-types)
- (sp (reverse bounds)))
- (or (null? sp)
- (let ((p (car sp)))
- (and (expr-contains-eq (car p) (cons 'list root-types))
- (loop (append (cdr p) root-types)
- (cdr sp)))))))
- `((scope-block
- (block
- (global ,name)
- ,(default-outer-ctor name field-names field-types
- params bounds locs))))
- '())
+ ;; Always define ctors even if we didn't change the definition.
+ ;; If newdef===prev, then this is a bit suspect, since we don't know what might be
+ ;; changing about the old ctor definitions (we don't even track whether we're
+ ;; replacing defaultctors with identical ones). But it seems better to have the ctors
+ ;; added alongside (replacing) the old ones, than to not have them and need them.
+ ;; Commonly Revise.jl should be used to figure out actually which methods should
+ ;; actually be deleted or added anew.
+ ,(if (null? defs)
+ `(call (core _defaultctors) ,newdef (inert ,loc))
+ `(scope-block
+ (block
+ (hardscope)
+ (global ,name)
+ ,@(map (lambda (c) (rewrite-ctor c name params field-names field-types)) defs))))
+ (latestworld)
(null)))))
(define (abstract-type-def-expr name params super)
@@ -1084,7 +1044,7 @@
(toplevel-only abstract_type)
(= ,name (call (core _abstracttype) (thismodule) (inert ,name) (call (core svec) ,@params)))
(call (core _setsuper!) ,name ,super)
- (call (core _typebody!) ,name)
+ (call (core _typebody!) (false) ,name)
(if (&& (call (core isdefinedglobal) (thismodule) (inert ,name) (false))
(call (core _equiv_typedef) (globalref (thismodule) ,name) ,name))
(null)
@@ -1105,7 +1065,7 @@
(toplevel-only primitive_type)
(= ,name (call (core _primitivetype) (thismodule) (inert ,name) (call (core svec) ,@params) ,n))
(call (core _setsuper!) ,name ,super)
- (call (core _typebody!) ,name)
+ (call (core _typebody!) (false) ,name)
(if (&& (call (core isdefinedglobal) (thismodule) (inert ,name) (false))
(call (core _equiv_typedef) (globalref (thismodule) ,name) ,name))
(null)
@@ -1225,7 +1185,9 @@
(cond ((and (length= e 2) (or (symbol? name) (globalref? name)))
(if (not (valid-name? name))
(error (string "invalid function name \"" name "\"")))
- `(method ,name))
+ (if (globalref? name)
+ `(block (global ,name) (method ,name))
+ `(block (global-if-global ,name) (method ,name))))
((not (pair? name)) e)
((eq? (car name) 'call)
(let* ((raw-typevars (or where '()))
@@ -1467,7 +1429,7 @@
(else
(error "invalid \"try\" form")))))
-(define (expand-unionall-def name type-ex (allow-local #t))
+(define (expand-unionall-def name type-ex (const? #t))
(if (and (pair? name)
(eq? (car name) 'curly))
(let ((name (cadr name))
@@ -1478,7 +1440,8 @@
(expand-forms
`(block
(= ,rr (where ,type-ex ,@params))
- (,(if allow-local 'assign-const-if-global 'const) ,name ,rr)
+ (,(if const? 'const 'assign-const-if-global) ,name ,rr)
+ (latestworld-if-toplevel)
,rr)))
(expand-forms
`(const (= ,name ,type-ex)))))
@@ -1487,38 +1450,24 @@
(filter (lambda (x) (not (underscore-symbol? x))) syms))
;; Expand `[global] const a::T = val`
-(define (expand-const-decl e (mustassgn #f))
- (if (length= e 3) e
- (let ((arg (cadr e)))
- (if (atom? arg)
- (if mustassgn
- (error "expected assignment after \"const\"")
- e)
- (case (car arg)
- ((global)
- (expand-const-decl `(const ,(cadr arg)) #t))
- ((=)
- (cond
- ;; `const f() = ...` - The `const` here is inoperative, but the syntax happened to work in earlier versions, so simply strip `const`.
- ;; TODO: Consider whether to keep this in 2.0.
- ((eventually-call? (cadr arg))
- (expand-forms arg))
- ((and (pair? (cadr arg)) (eq? (caadr arg) 'curly))
- (expand-unionall-def (cadr arg) (caddr arg)))
- ((and (pair? (cadr arg)) (eq? (caadr arg) 'tuple) (not (has-parameters? (cdr (cadr arg)))))
- ;; We need this case because `(f(), g()) = (1, 2)` goes through here, which cannot go via the `local` lowering below,
- ;; because the symbols come out wrong. Sigh... So much effort for such a syntax corner case.
- (expand-tuple-destruct (cdr (cadr arg)) (caddr arg) (lambda (assgn) `(,(car e) ,assgn))))
- (else
- (let ((rr (make-ssavalue)))
- (expand-forms `(block
- (= ,rr ,(caddr arg))
- (scope-block (block (hardscope)
- (local (= ,(cadr arg) ,rr))
- ,.(map (lambda (v) `(,(car e) (globalref (thismodule) ,v) ,v)) (filter-not-underscore (lhs-vars (cadr arg))))
- (latestworld)
- ,rr))))))))
- (else (error "expected assignment after \"const\"")))))))
+(define (expand-const-decl e)
+ (define (check-assignment asgn)
+ (unless (and (pair? asgn) (eq? (car asgn) '=))
+ ;; (const (global x)) is possible due to a parser quirk
+ (error "expected assignment after \"const\"")))
+ (if (length= e 3)
+ `(const ,(cadr e) ,(expand-forms (caddr e)))
+ (let ((arg (cadr e)))
+ (case (car arg)
+ ((global) (let ((asgn (cadr arg)))
+ (check-assignment asgn)
+ `(block
+ ,.(map (lambda (v) `(global ,v))
+ (lhs-bound-names (cadr asgn)))
+ ,(expand-assignment asgn #t))))
+ ((=) (check-assignment arg)
+ (expand-assignment arg #t))
+ (else (error "expected assignment after \"const\""))))))
(define (expand-atomic-decl e)
(error "unimplemented or unsupported atomic declaration"))
@@ -1564,6 +1513,152 @@
(else
(error (string "invalid syntax in \"" what "\" declaration"))))))))
+(define (expand-assignment e (const? #f))
+ (define lhs (cadr e))
+ (define (function-lhs? lhs)
+ (and (pair? lhs)
+ (or (eq? (car lhs) 'call)
+ (eq? (car lhs) 'where)
+ (and (eq? (car lhs) '|::|)
+ (pair? (cadr lhs))
+ (eq? (car (cadr lhs)) 'call)))))
+ (define (assignment-to-function lhs e) ;; convert '= expr to 'function expr
+ (cons 'function (cdr e)))
+ (define (maybe-wrap-const x)
+ (if const? `(const ,x) x))
+ (cond
+ ((function-lhs? lhs)
+ ;; `const f() = ...` - The `const` here is inoperative, but the syntax
+ ;; happened to work in earlier versions, so simply strip `const`.
+ (expand-forms (assignment-to-function lhs e)))
+ ((and (pair? lhs)
+ (eq? (car lhs) 'curly))
+ (expand-unionall-def (cadr e) (caddr e) const?))
+ ((assignment? (caddr e))
+ ;; chain of assignments - convert a=b=c to `b=c; a=c`
+ (let loop ((lhss (list lhs))
+ (rhs (caddr e)))
+ (if (and (assignment? rhs) (not (function-lhs? (cadr rhs))))
+ (loop (cons (cadr rhs) lhss) (caddr rhs))
+ (let* ((rr (if (symbol-like? rhs) rhs (make-ssavalue)))
+ (lhss (reverse lhss))
+ (lhs0 (car lhss))
+ (lhss (cdr lhss))
+ (lhss (reverse lhss)))
+ (expand-forms
+ `(block ,.(if (eq? rr rhs) '() `((= ,rr ,(if (assignment? rhs)
+ (assignment-to-function (cadr rhs) rhs)
+ rhs))))
+ ,@(map (lambda (l) `(= ,l ,rr)) lhss)
+ ;; In const x = y = z, only x becomes const
+ ,(maybe-wrap-const `(= ,lhs0 ,rr))
+ (unnecessary ,rr)))))))
+ ((or (and (symbol-like? lhs) (valid-name? lhs))
+ (globalref? lhs))
+ ;; TODO: We currently call (latestworld) after every (const _ _), but this
+ ;; may need to be moved elsewhere if we want to avoid making one const
+ ;; visible before side effects have been performed (#57484)
+ (if const?
+ (let ((rr (make-ssavalue)))
+ `(block
+ ,(sink-assignment rr (expand-forms (caddr e)))
+ (const ,lhs ,rr)
+ (latestworld)
+ (unnecessary ,rr)))
+ (sink-assignment lhs (expand-forms (caddr e)))))
+ ((atom? lhs)
+ (error (string "invalid assignment location \"" (deparse lhs) "\"")))
+ (else
+ (case (car lhs)
+ ((|.|)
+ ;; a.b =
+ (when const?
+ (error (string "cannot declare \"" (deparse lhs) "\" `const`")))
+ (let* ((a (cadr lhs))
+ (b (caddr lhs))
+ (rhs (caddr e)))
+ (if (and (length= b 2) (eq? (car b) 'tuple))
+ (error (string "invalid syntax \""
+ (string (deparse a) ".(" (deparse (cadr b)) ") = ...") "\"")))
+ (let ((aa (if (symbol-like? a) a (make-ssavalue)))
+ (bb (if (or (atom? b) (symbol-like? b) (and (pair? b) (quoted? b)))
+ b (make-ssavalue)))
+ (rr (if (or (symbol-like? rhs) (atom? rhs)) rhs (make-ssavalue))))
+ `(block
+ ,.(if (eq? aa a) '() (list (sink-assignment aa (expand-forms a))))
+ ,.(if (eq? bb b) '() (list (sink-assignment bb (expand-forms b))))
+ ,.(if (eq? rr rhs) '() (list (sink-assignment rr (expand-forms rhs))))
+ (call (top setproperty!) ,aa ,bb ,rr)
+ (unnecessary ,rr)))))
+ ((tuple)
+ (let ((lhss (cdr lhs))
+ (x (caddr e)))
+ (if (has-parameters? lhss)
+ ;; property destructuring
+ (expand-property-destruct lhss x maybe-wrap-const)
+ ;; multiple assignment
+ (expand-tuple-destruct lhss x maybe-wrap-const))))
+ ((typed_hcat)
+ (error "invalid spacing in left side of indexed assignment"))
+ ((typed_vcat typed_ncat)
+ (error "unexpected \";\" in left side of indexed assignment"))
+ ((ref)
+ ;; (= (ref a . idxs) rhs)
+ (when const?
+ (error (string "cannot declare \"" (deparse lhs) "\" `const`")))
+ (let ((a (cadr lhs))
+ (idxs (cddr lhs))
+ (rhs (caddr e)))
+ (let* ((reuse (and (pair? a)
+ (contains (lambda (x) (eq? x 'end))
+ idxs)))
+ (arr (if reuse (make-ssavalue) a))
+ (stmts (if reuse `((= ,arr ,(expand-forms a))) '()))
+ (rrhs (and (pair? rhs) (not (ssavalue? rhs)) (not (quoted? rhs))))
+ (r (if rrhs (make-ssavalue) rhs))
+ (rini (if rrhs (list (sink-assignment r (expand-forms rhs))) '())))
+ (receive
+ (new-idxs stuff) (process-indices arr idxs)
+ `(block
+ ,@stmts
+ ,.(map expand-forms stuff)
+ ,@rini
+ ,(expand-forms
+ `(call (top setindex!) ,arr ,r ,@new-idxs))
+ (unnecessary ,r))))))
+ ((|::|)
+ ;; (= (|::| T) rhs) is an error
+ (if (null? (cddr lhs))
+ (error (string "invalid assignment location \"" (deparse lhs) "\"")))
+ ;; (= (|::| x T) rhs)
+ (let ((x (cadr lhs))
+ (T (caddr lhs))
+ (rhs (caddr e)))
+ (let ((e (remove-argument-side-effects x)))
+ (if const?
+ ;; This could go through convert-assignment in the closure
+ ;; conversion pass, but since constants don't have declared types
+ ;; the way other variables do, we insert convert() here.
+ (expand-forms
+ ;; TODO: This behaviour (`const _:T = ...` does not call convert,
+ ;; but still evaluates RHS) should be documented.
+ `(const ,(car e) ,(if (underscore-symbol? (car e))
+ rhs
+ (convert-for-type-decl rhs T #t #f))))
+ (expand-forms
+ `(block ,@(cdr e)
+ ;; TODO: When x is a complex expression, this acts as a
+ ;; typeassert rather than a declaration.
+ ,.(if (underscore-symbol? (car e))
+ '() ; Assignment to _ will ultimately be discarded---don't declare anything
+ `((decl ,(car e) ,T)))
+ ,(maybe-wrap-const `(= ,(car e) ,rhs))))))))
+ ((vcat ncat)
+ ;; (= (vcat . args) rhs)
+ (error "use \"(a, b) = ...\" to assign multiple values"))
+ (else
+ (error (string "invalid assignment location \"" (deparse lhs) "\"")))))))
+
;; convert (lhss...) = (tuple ...) to assignments, eliminating the tuple
(define (tuple-to-assignments lhss0 x wrap)
(let loop ((lhss lhss0)
@@ -2299,7 +2394,7 @@
(gensy))
(else (make-ssavalue))))
-(define (expand-property-destruct lhs x)
+(define (expand-property-destruct lhs x (wrap identity))
(if (not (length= lhs 1))
(error (string "invalid assignment location \"" (deparse `(tuple ,lhs)) "\"")))
(let* ((lhss (cdar lhs))
@@ -2314,7 +2409,7 @@
(cadr field))
(else
(error (string "invalid assignment location \"" (deparse `(tuple ,lhs)) "\""))))))
- (expand-forms `(= ,field (call (top getproperty) ,xx (quote ,prop))))))
+ (expand-forms (wrap `(= ,field (call (top getproperty) ,xx (quote ,prop)))))))
lhss)
(unnecessary ,xx))))
@@ -2335,7 +2430,6 @@
(if (null? lhss)
'()
(let* ((lhs (car lhss))
- (wrapfirst (lambda (x i) (if (= i 1) (wrap x) x)))
(lhs- (cond ((or (symbol? lhs) (ssavalue? lhs))
lhs)
((vararg? lhs)
@@ -2347,7 +2441,10 @@
(make-ssavalue))))))
;; can't use ssavalues if it's a function definition
((eventually-call? lhs) (gensy))
- (else (make-ssavalue)))))
+ (else (make-ssavalue))))
+ ;; If we use an intermediary lhs, don't wrap `const`.
+ (wrap-subassign (if (eq? lhs lhs-) wrap identity))
+ (wrapfirst (lambda (x i) (if (= i 1) (wrap-subassign x) x))))
(if (and (vararg? lhs) (any vararg? (cdr lhss)))
(error "multiple \"...\" on lhs of assignment"))
(if (not (eq? lhs lhs-))
@@ -2359,7 +2456,7 @@
(if (underscore-symbol? (cadr lhs-))
'()
(list (expand-forms
- (wrap `(= ,(cadr lhs-) (call (top rest) ,xx ,@(if (eq? i 1) '() `(,st))))))))
+ (wrap-subassign `(= ,(cadr lhs-) (call (top rest) ,xx ,@(if (eq? i 1) '() `(,st))))))))
(let ((tail (if (eventually-call? lhs) (gensy) (make-ssavalue))))
(cons (expand-forms
(lower-tuple-assignment
@@ -2486,11 +2583,13 @@
(typ-svec (caddr sig-svec))
(tvars (cddr (cadddr sig-svec)))
(argtypes (cdddr typ-svec))
- (functionloc (cadr (caddddr sig-svec))))
- (let* ((argtype (foldl (lambda (var ex) `(call (core UnionAll) ,var ,ex))
- (expand-forms `(curly (core Tuple) ,@argtypes))
- (reverse tvars))))
- `(_opaque_closure ,(or argt argtype) ,rt_lb ,rt_ub ,isva ,(length argtypes) ,allow-partial ,functionloc ,lam))))
+ (functionloc (cadr (caddddr sig-svec)))
+ (argtype (foldl (lambda (var ex) `(call (core UnionAll) ,var ,ex))
+ (expand-forms `(curly (core Tuple) ,@argtypes))
+ (reverse tvars)))
+ (argtype (or argt argtype))
+ (argtype (if (null? stmts) argtype `(block ,@stmts ,argtype))))
+ `(_opaque_closure ,argtype ,rt_lb ,rt_ub ,isva ,(length argtypes) ,allow-partial ,functionloc ,lam)))
'block
(lambda (e)
@@ -2535,115 +2634,7 @@
'global expand-local-or-global-decl
'local-def expand-local-or-global-decl
- '=
- (lambda (e)
- (define lhs (cadr e))
- (define (function-lhs? lhs)
- (and (pair? lhs)
- (or (eq? (car lhs) 'call)
- (eq? (car lhs) 'where)
- (and (eq? (car lhs) '|::|)
- (pair? (cadr lhs))
- (eq? (car (cadr lhs)) 'call)))))
- (define (assignment-to-function lhs e) ;; convert '= expr to 'function expr
- (cons 'function (cdr e)))
- (cond
- ((function-lhs? lhs)
- (expand-forms (assignment-to-function lhs e)))
- ((and (pair? lhs)
- (eq? (car lhs) 'curly))
- (expand-unionall-def (cadr e) (caddr e)))
- ((assignment? (caddr e))
- ;; chain of assignments - convert a=b=c to `b=c; a=c`
- (let loop ((lhss (list lhs))
- (rhs (caddr e)))
- (if (and (assignment? rhs) (not (function-lhs? (cadr rhs))))
- (loop (cons (cadr rhs) lhss) (caddr rhs))
- (let ((rr (if (symbol-like? rhs) rhs (make-ssavalue))))
- (expand-forms
- `(block ,.(if (eq? rr rhs) '() `((= ,rr ,(if (assignment? rhs)
- (assignment-to-function (cadr rhs) rhs)
- rhs))))
- ,@(map (lambda (l) `(= ,l ,rr))
- lhss)
- (unnecessary ,rr)))))))
- ((or (and (symbol-like? lhs) (valid-name? lhs))
- (globalref? lhs))
- (sink-assignment lhs (expand-forms (caddr e))))
- ((atom? lhs)
- (error (string "invalid assignment location \"" (deparse lhs) "\"")))
- (else
- (case (car lhs)
- ((|.|)
- ;; a.b =
- (let* ((a (cadr lhs))
- (b (caddr lhs))
- (rhs (caddr e)))
- (if (and (length= b 2) (eq? (car b) 'tuple))
- (error (string "invalid syntax \""
- (string (deparse a) ".(" (deparse (cadr b)) ") = ...") "\"")))
- (let ((aa (if (symbol-like? a) a (make-ssavalue)))
- (bb (if (or (atom? b) (symbol-like? b) (and (pair? b) (quoted? b)))
- b (make-ssavalue)))
- (rr (if (or (symbol-like? rhs) (atom? rhs)) rhs (make-ssavalue))))
- `(block
- ,.(if (eq? aa a) '() (list (sink-assignment aa (expand-forms a))))
- ,.(if (eq? bb b) '() (list (sink-assignment bb (expand-forms b))))
- ,.(if (eq? rr rhs) '() (list (sink-assignment rr (expand-forms rhs))))
- (call (top setproperty!) ,aa ,bb ,rr)
- (unnecessary ,rr)))))
- ((tuple)
- (let ((lhss (cdr lhs))
- (x (caddr e)))
- (if (has-parameters? lhss)
- ;; property destructuring
- (expand-property-destruct lhss x)
- ;; multiple assignment
- (expand-tuple-destruct lhss x))))
- ((typed_hcat)
- (error "invalid spacing in left side of indexed assignment"))
- ((typed_vcat typed_ncat)
- (error "unexpected \";\" in left side of indexed assignment"))
- ((ref)
- ;; (= (ref a . idxs) rhs)
- (let ((a (cadr lhs))
- (idxs (cddr lhs))
- (rhs (caddr e)))
- (let* ((reuse (and (pair? a)
- (contains (lambda (x) (eq? x 'end))
- idxs)))
- (arr (if reuse (make-ssavalue) a))
- (stmts (if reuse `((= ,arr ,(expand-forms a))) '()))
- (rrhs (and (pair? rhs) (not (ssavalue? rhs)) (not (quoted? rhs))))
- (r (if rrhs (make-ssavalue) rhs))
- (rini (if rrhs (list (sink-assignment r (expand-forms rhs))) '())))
- (receive
- (new-idxs stuff) (process-indices arr idxs)
- `(block
- ,@stmts
- ,.(map expand-forms stuff)
- ,@rini
- ,(expand-forms
- `(call (top setindex!) ,arr ,r ,@new-idxs))
- (unnecessary ,r))))))
- ((|::|)
- ;; (= (|::| T) rhs) is an error
- (if (null? (cddr lhs))
- (error (string "invalid assignment location \"" (deparse lhs) "\"")))
- ;; (= (|::| x T) rhs)
- (let ((x (cadr lhs))
- (T (caddr lhs))
- (rhs (caddr e)))
- (let ((e (remove-argument-side-effects x)))
- (expand-forms
- `(block ,@(cdr e)
- (decl ,(car e) ,T)
- (= ,(car e) ,rhs))))))
- ((vcat ncat)
- ;; (= (vcat . args) rhs)
- (error "use \"(a, b) = ...\" to assign multiple values"))
- (else
- (error (string "invalid assignment location \"" (deparse lhs) "\"")))))))
+ '= expand-assignment
'abstract
(lambda (e)
@@ -3012,6 +3003,16 @@
(define (lhs-vars e)
(map decl-var (lhs-decls e)))
+;; Return all the names that will be bound by the assignment LHS, including
+;; curlies and calls.
+(define (lhs-bound-names e)
+ (cond ((underscore-symbol? e) '())
+ ((atom? e) (list e))
+ ((and (pair? e) (memq (car e) '(call curly where |::|)))
+ (lhs-bound-names (cadr e)))
+ ((and (pair? e) (memq (car e) '(tuple parameters)))
+ (apply append (map lhs-bound-names (cdr e))))))
+
(define (all-decl-vars e) ;; map decl-var over every level of an assignment LHS
(cond ((eventually-call? e) e)
((decl? e) (decl-var e))
@@ -3038,7 +3039,7 @@
;; like v = val, except that if `v` turns out global(either
;; implicitly or by explicit `global`), it gains an implicit `const`
(set! vars (cons (cadr e) vars)))
- ((=)
+ ((= const)
(let ((v (decl-var (cadr e))))
(find-assigned-vars- (caddr e))
(if (or (ssavalue? v) (globalref? v) (underscore-symbol? v))
@@ -3164,10 +3165,18 @@
((eq? (car e) 'global)
(check-valid-name (cadr e))
e)
+
((eq? (car e) 'assign-const-if-global)
(if (eq? (var-kind (cadr e) scope) 'local)
- (if (length= e 2) (null) `(= ,@(cdr e)))
- `(const ,@(cdr e))))
+ (if (length= e 2)
+ (null)
+ (resolve-scopes- `(= ,@(cdr e)) scope sp loc))
+ (resolve-scopes- `(const ,@(cdr e)) scope sp loc)))
+ ((eq? (car e) 'global-if-global)
+ (if (eq? (var-kind (cadr e) scope) 'local)
+ '(null)
+ `(global ,@(cdr e))))
+
((memq (car e) '(local local-def))
(check-valid-name (cadr e))
;; remove local decls
@@ -3320,7 +3329,7 @@
,(resolve-scopes- (caddr e) scope)
,(resolve-scopes- (cadddr e) scope (method-expr-static-parameters e))))
(else
- (if (and (eq? (car e) '=) (symbol? (cadr e))
+ (if (and (memq (car e) '(= const)) (symbol? (cadr e))
scope (null? (lam:args (scope:lam scope)))
(warn-var?! (cadr e) scope)
(= *scopewarn-opt* 1))
@@ -3440,7 +3449,7 @@
((local-def) ;; a local that we know has an assignment that dominates all usages
(let ((vi (get tab (cadr e) #f)))
(vinfo:set-never-undef! vi #t)))
- ((=)
+ ((= const)
(let ((vi (and (symbol? (cadr e)) (get tab (cadr e) #f))))
(if vi ; if local or captured
(begin (if (vinfo:asgn vi)
@@ -3525,7 +3534,7 @@ f(x) = yt(x)
(false) ,(length fields)))
(call (core _setsuper!) ,s ,super)
(const (globalref (thismodule) ,name) ,s)
- (call (core _typebody!) ,s (call (core svec) ,@types))
+ (call (core _typebody!) (false) ,s (call (core svec) ,@types))
(return (null)))))))))
(define (type-for-closure name fields super)
@@ -3539,7 +3548,7 @@ f(x) = yt(x)
(false) ,(length fields)))
(call (core _setsuper!) ,s ,super)
(const (globalref (thismodule) ,name) ,s)
- (call (core _typebody!) ,s
+ (call (core _typebody!) (false) ,s
(call (core svec) ,@(map (lambda (v) '(core Box)) fields)))
(return (null)))))))))
@@ -3641,8 +3650,8 @@ f(x) = yt(x)
rhs1))
(ex `(= ,var ,rhs)))
(if (eq? rhs1 rhs0)
- `(block ,ex ,rhs0)
- `(block (= ,rhs1 ,rhs0)
+ `(block (globaldecl ,var) ,ex ,rhs0)
+ `(block (globaldecl ,var) (= ,rhs1 ,rhs0)
,ex
,rhs1))))
@@ -3800,7 +3809,7 @@ f(x) = yt(x)
(Set '(quote top core lineinfo line inert local-def unnecessary copyast
meta inbounds boundscheck loopinfo decl aliasscope popaliasscope
thunk with-static-parameters toplevel-only
- global globalref assign-const-if-global isglobal thismodule
+ global globalref global-if-global assign-const-if-global isglobal thismodule
const atomic null true false ssavalue isdefined toplevel module lambda
error gc_preserve_begin gc_preserve_end import using export public inline noinline purity)))
@@ -4034,7 +4043,7 @@ f(x) = yt(x)
((atom? e) e)
(else
(case (car e)
- ((quote top core globalref thismodule lineinfo line break inert module toplevel null true false meta) e)
+ ((quote top core globalref thismodule lineinfo line break inert module toplevel null true false meta import using) e)
((toplevel-only)
;; hack to avoid generating a (method x) expr for struct types
(if (eq? (cadr e) 'struct)
@@ -4057,7 +4066,10 @@ f(x) = yt(x)
'(null)
`(newvar ,(cadr e))))))
((const)
- (put! globals (binding-to-globalref (cadr e)) #f)
+ ;; Check we've expanded surface `const` (1 argument form)
+ (assert (and (length= e 3)))
+ (when (globalref? (cadr e))
+ (put! globals (cadr e) #f))
e)
((atomic) e)
((isdefined) ;; convert isdefined expr to function for closure converted variables
@@ -4095,6 +4107,7 @@ f(x) = yt(x)
(capt-var-access v fname opaq)
v)))
cvs)))
+ (set-car! (cdddr (lam:vinfo lam2)) '()) ;; must capture static_parameters as values inside opaque_closure
`(new_opaque_closure
,(cadr e) ,(or (caddr e) '(call (core apply_type) (core Union))) ,(or (cadddr e) '(core Any)) ,allow-partial
(opaque_closure_method (null) ,nargs ,isva ,functionloc ,(convert-lambda lam2 (car (lam:args lam2)) #f '() (symbol-to-idx-map cvs) parsed-method-stack))
@@ -4129,8 +4142,9 @@ f(x) = yt(x)
`(toplevel-butfirst
;; wrap in toplevel-butfirst so it gets moved higher along with
;; closure type definitions
+ (unnecessary ,(cadr e))
,e
- (thunk (lambda () (() () 0 ()) (block (return ,e))))))))
+ (latestworld)))))
((null? cvs)
`(block
,@sp-inits
@@ -4364,11 +4378,10 @@ f(x) = yt(x)
(define (linearize e)
(cond ((or (not (pair? e)) (quoted? e)) e)
((eq? (car e) 'lambda)
- (set-car! (cdddr e) (compile-body (cadddr e) (append (car (caddr e))
- (cadr (caddr e)))
- e)))
- (else (for-each linearize (cdr e))))
- e)
+ (list-set e 3 (compile-body (cadddr e)
+ (append (car (caddr e))
+ (cadr (caddr e))) e)))
+ (else (cons (car e) (map linearize (cdr e))))))
(define (valid-ir-argument? e)
(or (simple-atom? e)
@@ -4409,7 +4422,6 @@ f(x) = yt(x)
(first-line #t)
(current-loc #f)
(rett #f)
- (global-const-error #f)
(vinfo-table (vinfo-to-table (car (lam:vinfo lam))))
(arg-map #f) ;; map arguments to new names if they are assigned
(label-counter 0) ;; counter for generating label addresses
@@ -4622,22 +4634,19 @@ f(x) = yt(x)
(cdr cnd)
(list cnd))))))
tests))
- (define (emit-assignment-or-setglobal lhs rhs)
- (if (globalref? lhs)
- (begin
- (emit `(global ,lhs))
- (if (null? (cadr lam))
- (emit `(latestworld)))
- (emit `(call (top setglobal!) ,(cadr lhs) (inert ,(caddr lhs)) ,rhs)))
- (emit `(= ,lhs ,rhs))))
- (define (emit-assignment lhs rhs)
+ (define (emit-assignment-or-setglobal lhs rhs (op '=))
+ ;; (const (globalref _ _) _) does not use setglobal!
+ (if (and (globalref? lhs) (eq? op '=))
+ (emit `(call (top setglobal!) ,(cadr lhs) (inert ,(caddr lhs)) ,rhs))
+ (emit `(,op ,lhs ,rhs))))
+ (define (emit-assignment lhs rhs (op '=))
(if rhs
(if (valid-ir-rvalue? lhs rhs)
- (emit-assignment-or-setglobal lhs rhs)
+ (emit-assignment-or-setglobal lhs rhs op)
(let ((rr (make-ssavalue)))
(emit `(= ,rr ,rhs))
- (emit-assignment-or-setglobal lhs rr)))
- (emit-assignment-or-setglobal lhs `(null))) ; in unreachable code (such as after return), still emit the assignment so that the structure of those uses is preserved
+ (emit-assignment-or-setglobal lhs rr op)))
+ (emit-assignment-or-setglobal lhs `(null) op)) ; in unreachable code (such as after return), still emit the assignment so that the structure of those uses is preserved
#f)
;; the interpreter loop. `break-labels` keeps track of the labels to jump to
;; for all currently closing break-blocks.
@@ -4647,18 +4656,18 @@ f(x) = yt(x)
;; from the current function.
(define (compile e break-labels value tail)
(if (or (not (pair? e)) (memq (car e) '(null true false ssavalue quote inert top core copyast the_exception $
- globalref thismodule cdecl stdcall fastcall thiscall llvmcall)))
+ globalref thismodule cdecl stdcall fastcall thiscall llvmcall static_parameter)))
(let ((e1 (if (and arg-map (symbol? e))
(get arg-map e e)
e)))
- (if (and value (or (underscore-symbol? e)
- (and (pair? e) (eq? (car e) 'globalref)
- (underscore-symbol? (cadr e)))))
+ (if (or (underscore-symbol? e)
+ (and (pair? e) (eq? (car e) 'globalref)
+ (underscore-symbol? (cadr e))))
(error (string "all-underscore identifiers are write-only and their values cannot be used in expressions" (format-loc current-loc))))
(cond (tail (emit-return tail e1))
(value e1)
((symbol? e1) (emit e1) #f) ;; keep symbols for undefined-var checking
- ((and (pair? e1) (eq? (car e1) 'globalref)) (emit e1) #f) ;; keep globals for undefined-var checking
+ ((and (pair? e1) (memq (car e1) '(globalref static_parameter))) (emit e1) #f) ;; keep for undefined-var checking
(else #f)))
(case (car e)
((call new splatnew foreigncall cfunction new_opaque_closure)
@@ -4703,7 +4712,12 @@ f(x) = yt(x)
(cond (tail (emit-return tail callex))
(value callex)
(else (emit callex)))))
- ((=)
+ ((= const)
+ (when (eq? (car e) 'const)
+ (when (local-in? (cadr e) lam)
+ (error (string "unsupported `const` declaration on local variable" (format-loc current-loc))))
+ (when (pair? (cadr lam))
+ (error (string "`global const` declaration not allowed inside function" (format-loc current-loc)))))
(let ((lhs (cadr e)))
(if (and (symbol? lhs) (underscore-symbol? lhs))
(compile (caddr e) break-labels value tail)
@@ -4716,10 +4730,10 @@ f(x) = yt(x)
rhs (make-ssavalue))))
(if (not (eq? rr rhs))
(emit `(= ,rr ,rhs)))
- (emit-assignment-or-setglobal lhs rr)
+ (emit-assignment-or-setglobal lhs rr (car e))
(if tail (emit-return tail rr))
rr)
- (emit-assignment lhs rhs))))))
+ (emit-assignment lhs rhs (car e)))))))
((block)
(let* ((last-fname filename)
(fnm (first-non-meta e))
@@ -4951,24 +4965,17 @@ f(x) = yt(x)
(emit `(latestworld))))
((globaldecl)
(if value (error "misplaced \"global\" declaration"))
- (if (atom? (caddr e)) (begin (emit e) (emit `(latestworld)))
+ (if (or (length= e 2) (atom? (caddr e))) (emit e)
(let ((rr (make-ssavalue)))
(emit `(= ,rr ,(caddr e)))
- (emit `(globaldecl ,(cadr e) ,rr))
- (emit `(latestworld)))))
+ (emit `(globaldecl ,(cadr e) ,rr))))
+ (if (null? (cadr lam))
+ (emit `(latestworld))))
((local-def) #f)
((local) #f)
((moved-local)
(set-car! (lam:vinfo lam) (append (car (lam:vinfo lam)) `((,(cadr e) Any 2))))
#f)
- ((const)
- (if (local-in? (cadr e) lam)
- (error (string "unsupported `const` declaration on local variable" (format-loc current-loc)))
- (if (pair? (cadr lam))
- ;; delay this error to allow "misplaced struct" errors to happen first
- (if (not global-const-error)
- (set! global-const-error current-loc))
- (emit e))))
((atomic) (error "misplaced atomic declaration"))
((isdefined throw_undef_if_not) (if tail (emit-return tail e) e))
((boundscheck) (if tail (emit-return tail e) e))
@@ -5099,8 +5106,6 @@ f(x) = yt(x)
(let ((pexc (pop-exc-expr src-catch-tokens target-catch-tokens)))
(if pexc (set-cdr! point (cons pexc (cdr point)))))))))
handler-goto-fixups)
- (if global-const-error
- (error (string "`global const` declaration not allowed inside function" (format-loc global-const-error))))
(let* ((stmts (reverse! code))
(di (definitely-initialized-vars stmts vi))
(body (cons 'block (filter (lambda (e)
@@ -5172,6 +5177,14 @@ f(x) = yt(x)
(define (set-lineno! lineinfo num)
(set-car! (cddr lineinfo) num))
+;; note that the 'list and 'block atoms make all lists 1-indexed.
+;; returns a 5-element vector containing:
+;; code: `(block ,@(n expressions))
+;; locs: list of line-table index, where code[i] has lineinfo line-table[locs[i]]
+;; line-table: list of `(lineinfo file.jl 123 0)'
+;; ssavalue-table: table of (ssa-num . code-index)
+;; where ssavalue references in `code` need this remapping
+;; label-table: table of (label . code-index)
(define (compact-ir body file line)
(let ((code '(block))
(locs '(list))
@@ -5278,7 +5291,7 @@ f(x) = yt(x)
e)
((ssavalue? e)
(let ((idx (get ssavalue-table (cadr e) #f)))
- (if (not idx) (begin (prn e) (prn lam) (error "ssavalue with no def")))
+ (if (not idx) (error "internal bug: ssavalue with no def"))
`(ssavalue ,idx)))
((eq? (car e) 'goto)
`(goto ,(get label-table (cadr e))))
diff --git a/src/julia.h b/src/julia.h
index a80a69049ccb2..13348fa9b153d 100644
--- a/src/julia.h
+++ b/src/julia.h
@@ -312,6 +312,7 @@ typedef struct _jl_code_info_t {
// various boolean properties:
uint8_t propagate_inbounds;
uint8_t has_fcall;
+ uint8_t has_image_globalref;
uint8_t nospecializeinfer;
uint8_t isva;
// uint8 settings
@@ -330,8 +331,8 @@ typedef struct _jl_method_t {
struct _jl_module_t *module;
jl_sym_t *file;
int32_t line;
+ _Atomic(int32_t) dispatch_status; // bits defined in staticdata.jl
_Atomic(size_t) primary_world;
- _Atomic(size_t) deleted_world;
// method's type signature. redundant with TypeMapEntry->specTypes
jl_value_t *sig;
@@ -373,6 +374,10 @@ typedef struct _jl_method_t {
uint8_t isva;
uint8_t is_for_opaque_closure;
uint8_t nospecializeinfer;
+ // bit flags, 0x01 = scanned
+ // 0x02 = added to module scanned list (either from scanning or inference edge)
+ _Atomic(uint8_t) did_scan_source;
+
// uint8 settings
uint8_t constprop; // 0x00 = use heuristic; 0x01 = aggressive; 0x02 = none
uint8_t max_varargs; // 0xFF = use heuristic; otherwise, max # of args to expand
@@ -399,13 +404,16 @@ struct _jl_method_instance_t {
} def; // pointer back to the context for this code
jl_value_t *specTypes; // argument types this was specialized for
jl_svec_t *sparam_vals; // static parameter values, indexed by def.method->sig
- jl_array_t *backedges; // list of code-instances which call this method-instance; `invoke` records (invokesig, caller) pairs
+ // list of code-instances which call this method-instance; `invoke` records (invokesig, caller) pairs
+ jl_array_t *backedges;
_Atomic(struct _jl_code_instance_t*) cache;
uint8_t cache_with_orig; // !cache_with_specTypes
// flags for this method instance
// bit 0: generated by an explicit `precompile(...)`
// bit 1: dispatched
+ // bit 2: The ->backedges field is currently being walked higher up the stack - entries may be deleted, but not moved
+ // bit 3: The ->backedges field was modified and should be compacted when clearing bit 2
_Atomic(uint8_t) flags;
};
#define JL_MI_FLAGS_MASK_PRECOMPILED 0x01
@@ -445,9 +453,9 @@ typedef struct _jl_code_instance_t {
_Atomic(jl_value_t *) inferred;
_Atomic(jl_debuginfo_t *) debuginfo; // stored information about edges from this object (set once, with a happens-before both source and invoke)
_Atomic(jl_svec_t *) edges; // forward edge info
- //TODO: uint8_t absolute_max; // whether true max world is unknown
// purity results
+ jl_value_t *analysis_results; // Analysis results about this code (IPO-safe)
// see also encode_effects() and decode_effects() in `base/compiler/effects.jl`,
_Atomic(uint32_t) ipo_purity_bits;
// purity_flags:
@@ -459,9 +467,14 @@ typedef struct _jl_code_instance_t {
// uint8_t inaccessiblememonly : 2;
// uint8_t noub : 2;
// uint8_t nonoverlayed : 2;
- jl_value_t *analysis_results; // Analysis results about this code (IPO-safe)
// compilation state cache
+ // these time fields have units of seconds (60 ns minimum resolution and 18 hour maximum saturates to Infinity) and are stored in Float16 format
+ uint16_t time_infer_total; // total cost of computing `inferred` originally
+ uint16_t time_infer_cache_saved; // adjustment to total cost, reflecting how much time was saved by having caches, to give a stable real cost without caches for comparisons
+ uint16_t time_infer_self; // self cost of julia inference for `inferred` (included in time_infer_total)
+ _Atomic(uint16_t) time_compile; // self cost of llvm compilation (e.g. of computing `invoke`)
+ //TODO: uint8_t absolute_max; // whether true max world is unknown
_Atomic(uint8_t) specsigflags; // & 0b001 == specptr is a specialized function signature for specTypes->rettype
// & 0b010 == invokeptr matches specptr
// & 0b100 == From image
@@ -523,6 +536,7 @@ typedef struct {
uint8_t mutabl:1;
uint8_t mayinlinealloc:1;
uint8_t _reserved:5;
+ _Atomic(uint8_t) cache_entry_count; // (approximate counter of TypeMapEntry for heuristics)
uint8_t max_methods; // override for inference's max_methods setting (0 = no additional limit or relaxation)
uint8_t constprop_heustic; // override for inference's constprop heuristic
} jl_typename_t;
@@ -616,92 +630,165 @@ typedef struct _jl_weakref_t {
} jl_weakref_t;
// N.B: Needs to be synced with runtime_internals.jl
+// We track essentially three levels of binding strength:
+//
+// 1. Implicit Bindings (Weakest)
+// These binding kinds depend solely on the set of using'd packages and are not explicitly
+// declared:
+//
+// PARTITION_KIND_IMPLICIT_CONST
+// PARTITION_KIND_IMPLICIT_GLOBAL
+// PARTITION_KIND_GUARD
+// PARTITION_KIND_FAILED
+//
+// 2. Weakly Declared Bindings (Weak)
+// The binding was declared using `global`. It is treated as a mutable, `Any` type global
+// for almost all purposes, except that it receives slightly worse optimizations, since it
+// may be replaced.
+//
+// PARTITION_KIND_DECLARED
+//
+// 3. Strong Declared Bindings (Weak)
+// All other bindings are explicitly declared using a keyword or global assignment.
+// These are considered strongest:
+//
+// PARTITION_KIND_CONST
+// PARTITION_KIND_CONST_IMPORT
+// PARTITION_KIND_EXPLICIT
+// PARTITION_KIND_IMPORTED
+// PARTITION_KIND_GLOBAL
+// PARTITION_KIND_UNDEF_CONST
+//
+// The runtime supports syntactic invalidation (by raising the world age and changing the partition type
+// in the new world age) from any partition kind to any other.
+//
+// However, not all transitions are allowed syntactically. We have the following rules for SYNTACTIC invalidation:
+// 1. It is always syntactically permissable to replace a weaker binding by a stronger binding
+// 2. Implicit bindings can be syntactically changed to other implicit bindings by changing the `using` set.
+// 3. Finally, we syntactically permit replacing one PARTITION_KIND_CONST(_IMPORT) by another of a different value.
+//
+// We may make this list more permissive in the future.
+//
+// Finally, PARTITION_KIND_BACKDATED_CONST is a special case, and the only case where we may replace an
+// existing partition by a different partition kind in the same world age. As such, it needs special
+// support in inference. Any partition kind that may be replaced by a PARTITION_KIND_BACKDATED_CONST
+// must be inferred accordingly. PARTITION_KIND_BACKDATED_CONST is intended as a temporary compatibility
+// measure. The following kinds may be replaced by PARTITION_KIND_BACKDATED_CONST:
+// - PARTITION_KIND_GUARD
+// - PARTITION_KIND_FAILED
+// - PARTITION_KIND_DECLARED
enum jl_partition_kind {
// Constant: This binding partition is a constant declared using `const _ = ...`
// ->restriction holds the constant value
- BINDING_KIND_CONST = 0x0,
+ PARTITION_KIND_CONST = 0x0,
// Import Constant: This binding partition is a constant declared using `import A`
// ->restriction holds the constant value
- BINDING_KIND_CONST_IMPORT = 0x1,
- // Global: This binding partition is a global variable.
+ PARTITION_KIND_CONST_IMPORT = 0x1,
+ // Global: This binding partition is a global variable. It was declared either using
+ // `global x::T` to implicitly through a syntactic global assignment.
// -> restriction holds the type restriction
- BINDING_KIND_GLOBAL = 0x2,
- // Implicit: The binding was implicitly imported from a `using`'d module.
- // ->restriction holds the imported binding
- BINDING_KIND_IMPLICIT = 0x3,
+ PARTITION_KIND_GLOBAL = 0x2,
+ // Implicit: The binding was a global, implicitly imported from a `using`'d module.
+ // ->restriction holds the ultimately imported global binding
+ PARTITION_KIND_IMPLICIT_GLOBAL = 0x3,
+ // Implicit: The binding was a constant, implicitly imported from a `using`'d module.
+ // ->restriction holds the ultimately imported constant value
+ PARTITION_KIND_IMPLICIT_CONST = 0x4,
// Explicit: The binding was explicitly `using`'d by name
// ->restriction holds the imported binding
- BINDING_KIND_EXPLICIT = 0x4,
+ PARTITION_KIND_EXPLICIT = 0x5,
// Imported: The binding was explicitly `import`'d by name
// ->restriction holds the imported binding
- BINDING_KIND_IMPORTED = 0x5,
+ PARTITION_KIND_IMPORTED = 0x6,
// Failed: We attempted to import the binding, but the import was ambiguous
// ->restriction is NULL.
- BINDING_KIND_FAILED = 0x6,
- // Declared: The binding was declared using `global` or similar
+ PARTITION_KIND_FAILED = 0x7,
+ // Declared: The binding was declared using `global` or similar. This acts in most ways like
+ // PARTITION_KIND_GLOBAL with an `Any` restriction, except that it may be redefined to a stronger
+ // binding like `const` or an explicit import.
// ->restriction is NULL.
- BINDING_KIND_DECLARED = 0x7,
+ PARTITION_KIND_DECLARED = 0x8,
// Guard: The binding was looked at, but no global or import was resolved at the time
// ->restriction is NULL.
- BINDING_KIND_GUARD = 0x8,
+ PARTITION_KIND_GUARD = 0x9,
// Undef Constant: This binding partition is a constant declared using `const`, but
// without a value.
// ->restriction is NULL
- BINDING_KIND_UNDEF_CONST = 0x9,
+ PARTITION_KIND_UNDEF_CONST = 0xa,
// Backated constant. A constant that was backdated for compatibility. In all other
- // ways equivalent to BINDING_KIND_CONST, but prints a warning on access
- BINDING_KIND_BACKDATED_CONST = 0xa,
+ // ways equivalent to PARTITION_KIND_CONST, but prints a warning on access
+ PARTITION_KIND_BACKDATED_CONST = 0xb,
+
+ // This is not a real binding kind, but can be used to ask for a re-resolution
+ // of the implicit binding kind
+ PARTITION_FAKE_KIND_IMPLICIT_RECOMPUTE = 0xc,
+ PARTITION_FAKE_KIND_CYCLE = 0xd
};
-#ifdef _P64
-// Union of a ptr and a 3 bit field.
-typedef uintptr_t jl_ptr_kind_union_t;
+static const uint8_t PARTITION_MASK_KIND = 0x0f;
+static const uint8_t PARTITION_MASK_FLAG = 0xf0;
+
+//// These are flags that get anded into the above
+//
+// _EXPORTED: This binding partition is exported. In the world ranges covered by this partitions,
+// other modules that `using` this module, may implicit import this binding.
+static const uint8_t PARTITION_FLAG_EXPORTED = 0x10;
+// _DEPRECATED: This binding partition is deprecated. It is considered weak for the purposes of
+// implicit import resolution.
+static const uint8_t PARTITION_FLAG_DEPRECATED = 0x20;
+// _DEPWARN: This binding partition will print a deprecation warning on access. Note that _DEPWARN
+// implies _DEPRECATED. However, the reverse is not true. Such bindings are usually used for functions,
+// where calling the function itself will provide a (better) deprecation warning/error.
+static const uint8_t PARTITION_FLAG_DEPWARN = 0x40;
+
+#if defined(_COMPILER_MICROSOFT_)
+#define JL_ALIGNED_ATTR(alignment) \
+ __declspec(align(alignment))
#else
-typedef struct __attribute__((aligned(8))) { jl_value_t *val; size_t kind; } jl_ptr_kind_union_t;
+#define JL_ALIGNED_ATTR(alignment) \
+ __attribute__((aligned(alignment)))
#endif
-typedef struct __attribute__((aligned(8))) _jl_binding_partition_t {
+
+typedef struct JL_ALIGNED_ATTR(8) _jl_binding_partition_t {
JL_DATA_TYPE
/* union {
- * // For ->kind == BINDING_KIND_GLOBAL
+ * // For ->kind == PARTITION_KIND_GLOBAL
* jl_value_t *type_restriction;
- * // For ->kind == BINDING_KIND_CONST(_IMPORT)
+ * // For ->kind in (PARTITION_KIND_CONST(_IMPORT), PARTITION_KIND_IMPLICIT_CONST)
* jl_value_t *constval;
- * // For ->kind in (BINDING_KIND_IMPLICIT, BINDING_KIND_EXPLICIT, BINDING_KIND_IMPORT)
+ * // For ->kind in (PARTITION_KIND_IMPLICIT_GLOBAL, PARTITION_KIND_EXPLICIT, PARTITION_KIND_IMPORT)
* jl_binding_t *imported;
* } restriction;
- *
- * Currently: Low 3 bits hold ->kind on _P64 to avoid needing >8 byte atomics
- *
- * This field is updated atomically with both kind and restriction. The following
- * transitions are allowed and modeled by the system:
- *
- * GUARD -> any
- * (DECLARED, FAILED) -> any non-GUARD
- * IMPLICIT -> {EXPLICIT, IMPORTED} (->restriction unchanged only)
- *
- * In addition, we permit (with warning about undefined behavior) changing the restriction
- * pointer for CONST(_IMPORT).
- *
- * All other kind or restriction transitions are disallowed.
*/
- _Atomic(jl_ptr_kind_union_t) restriction;
- size_t min_world;
+ jl_value_t *restriction;
+ _Atomic(size_t) min_world;
_Atomic(size_t) max_world;
_Atomic(struct _jl_binding_partition_t *) next;
- size_t reserved; // Reserved for ->kind. Currently this holds the low bits of ->restriction during serialization
+ size_t kind;
} jl_binding_partition_t;
+STATIC_INLINE enum jl_partition_kind jl_binding_kind(jl_binding_partition_t *bpart) JL_NOTSAFEPOINT
+{
+ return (enum jl_partition_kind)(bpart->kind & 0xf);
+}
+
+enum jl_binding_flags {
+ BINDING_FLAG_DID_PRINT_BACKDATE_ADMONITION = 0x1,
+ BINDING_FLAG_DID_PRINT_IMPLICIT_IMPORT_ADMONITION = 0x2,
+ // `export` is tracked in partitions, but sets this as well
+ BINDING_FLAG_PUBLICP = 0x4,
+ // Set if any methods defined in this module implicitly reference
+ // this binding. If not, invalidation is optimized.
+ BINDING_FLAG_ANY_IMPLICIT_EDGES = 0x8
+};
+
typedef struct _jl_binding_t {
JL_DATA_TYPE
jl_globalref_t *globalref; // cached GlobalRef for this binding
_Atomic(jl_value_t*) value;
_Atomic(jl_binding_partition_t*) partitions;
jl_array_t *backedges;
- uint8_t did_print_backdate_admonition:1;
- uint8_t exportp:1; // `public foo` sets `publicp`, `export foo` sets both `publicp` and `exportp`
- uint8_t publicp:1; // exportp without publicp is not allowed.
- uint8_t deprecated:2; // 0=not deprecated, 1=renamed, 2=moved to another package
- uint8_t padding:3;
+ _Atomic(uint8_t) flags;
} jl_binding_t;
typedef struct {
@@ -717,6 +804,8 @@ typedef struct _jl_module_t {
_Atomic(jl_genericmemory_t*) bindingkeyset; // index lookup by name into bindings
jl_sym_t *file;
int32_t line;
+ jl_value_t *usings_backedges;
+ jl_value_t *scanned_methods;
// hidden fields:
arraylist_t usings; /* arraylist of struct jl_module_using */ // modules with all bindings potentially imported
jl_uuid_t build_id;
@@ -728,6 +817,8 @@ typedef struct _jl_module_t {
int8_t infer;
uint8_t istopmod;
int8_t max_methods;
+ // If cleared no binding partition in this module has PARTITION_FLAG_EXPORTED and min_world > jl_require_world.
+ _Atomic(int8_t) export_set_changed_since_require_world;
jl_mutex_t lock;
intptr_t hash;
} jl_module_t;
@@ -943,6 +1034,7 @@ extern JL_DLLIMPORT jl_datatype_t *jl_undefvarerror_type JL_GLOBALLY_ROOTED;
extern JL_DLLIMPORT jl_datatype_t *jl_fielderror_type JL_GLOBALLY_ROOTED;
extern JL_DLLIMPORT jl_datatype_t *jl_atomicerror_type JL_GLOBALLY_ROOTED;
extern JL_DLLIMPORT jl_datatype_t *jl_missingcodeerror_type JL_GLOBALLY_ROOTED;
+extern JL_DLLIMPORT jl_datatype_t *jl_trimfailure_type JL_GLOBALLY_ROOTED;
extern JL_DLLIMPORT jl_datatype_t *jl_lineinfonode_type JL_GLOBALLY_ROOTED;
extern JL_DLLIMPORT jl_datatype_t *jl_abioverride_type JL_GLOBALLY_ROOTED;
extern JL_DLLIMPORT jl_value_t *jl_stackovf_exception JL_GLOBALLY_ROOTED;
@@ -1414,7 +1506,7 @@ JL_DLLEXPORT jl_value_t *jl_unwrap_unionall(jl_value_t *v JL_PROPAGATES_ROOT) JL
#define jl_inlinedatatype_layout(t) (((jl_datatype_t*)t)->layout)
STATIC_INLINE const jl_datatype_layout_t *jl_datatype_layout(jl_datatype_t *t) JL_NOTSAFEPOINT
{
- if (jl_is_layout_opaque(t->layout)) // e.g. GenericMemory
+ if (t->layout == NULL || jl_is_layout_opaque(t->layout)) // e.g. GenericMemory
t = (jl_datatype_t*)jl_unwrap_unionall(t->name->wrapper);
return t->layout;
}
@@ -1431,6 +1523,13 @@ STATIC_INLINE char *jl_symbol_name_(jl_sym_t *s) JL_NOTSAFEPOINT
}
#define jl_symbol_name(s) jl_symbol_name_(s)
+STATIC_INLINE const char *jl_module_debug_name(jl_module_t *mod) JL_NOTSAFEPOINT
+{
+ if (!mod)
+ return "";
+ return jl_symbol_name(mod->name);
+}
+
static inline uint32_t jl_fielddesc_size(int8_t fielddesc_type) JL_NOTSAFEPOINT
{
assert(fielddesc_type >= 0 && fielddesc_type <= 2);
@@ -1532,7 +1631,7 @@ static inline int jl_field_isconst(jl_datatype_t *st, int i) JL_NOTSAFEPOINT
#define jl_is_mutable(t) (((jl_datatype_t*)t)->name->mutabl)
#define jl_is_mutable_datatype(t) (jl_is_datatype(t) && (((jl_datatype_t*)t)->name->mutabl))
#define jl_is_immutable(t) (!((jl_datatype_t*)t)->name->mutabl)
-#define jl_is_immutable_datatype(t) (jl_is_datatype(t) && (!((jl_datatype_t*)t)->name->mutabl))
+#define jl_may_be_immutable_datatype(t) (jl_is_datatype(t) && (!((jl_datatype_t*)t)->name->mutabl))
#define jl_is_uniontype(v) jl_typetagis(v,jl_uniontype_tag<<4)
#define jl_is_typevar(v) jl_typetagis(v,jl_tvar_tag<<4)
#define jl_is_unionall(v) jl_typetagis(v,jl_unionall_tag<<4)
@@ -1566,6 +1665,7 @@ static inline int jl_field_isconst(jl_datatype_t *st, int i) JL_NOTSAFEPOINT
#define jl_is_quotenode(v) jl_typetagis(v,jl_quotenode_type)
#define jl_is_newvarnode(v) jl_typetagis(v,jl_newvarnode_type)
#define jl_is_linenode(v) jl_typetagis(v,jl_linenumbernode_type)
+#define jl_is_linenumbernode(v) jl_typetagis(v,jl_linenumbernode_type)
#define jl_is_method_instance(v) jl_typetagis(v,jl_method_instance_type)
#define jl_is_code_instance(v) jl_typetagis(v,jl_code_instance_type)
#define jl_is_code_info(v) jl_typetagis(v,jl_code_info_type)
@@ -1843,6 +1943,7 @@ JL_DLLEXPORT jl_method_instance_t *jl_new_method_instance_uninit(void);
JL_DLLEXPORT jl_svec_t *jl_svec(size_t n, ...) JL_MAYBE_UNROOTED;
JL_DLLEXPORT jl_svec_t *jl_svec1(void *a);
JL_DLLEXPORT jl_svec_t *jl_svec2(void *a, void *b);
+JL_DLLEXPORT jl_svec_t *jl_svec3(void *a, void *b, void *c);
JL_DLLEXPORT jl_svec_t *jl_alloc_svec(size_t n);
JL_DLLEXPORT jl_svec_t *jl_alloc_svec_uninit(size_t n);
JL_DLLEXPORT jl_svec_t *jl_svec_copy(jl_svec_t *a);
@@ -1854,10 +1955,11 @@ JL_DLLEXPORT jl_sym_t *jl_gensym(void);
JL_DLLEXPORT jl_sym_t *jl_tagged_gensym(const char *str, size_t len);
JL_DLLEXPORT jl_sym_t *jl_get_root_symbol(void);
JL_DLLEXPORT jl_value_t *jl_get_binding_value(jl_binding_t *b JL_PROPAGATES_ROOT);
+JL_DLLEXPORT jl_value_t *jl_get_binding_value_in_world(jl_binding_t *b JL_PROPAGATES_ROOT, size_t world);
JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_const(jl_binding_t *b JL_PROPAGATES_ROOT);
-JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_resolved(jl_binding_t *b JL_PROPAGATES_ROOT) JL_NOTSAFEPOINT;
-JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_resolved_and_const(jl_binding_t *b JL_PROPAGATES_ROOT) JL_NOTSAFEPOINT;
-JL_DLLEXPORT jl_value_t *jl_declare_const_gf(jl_binding_t *b, jl_module_t *mod, jl_sym_t *name);
+JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_resolved_debug_only(jl_binding_t *b JL_PROPAGATES_ROOT) JL_NOTSAFEPOINT;
+JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_latest_resolved_and_const_debug_only(jl_binding_t *b JL_PROPAGATES_ROOT) JL_NOTSAFEPOINT;
+JL_DLLEXPORT jl_value_t *jl_declare_const_gf(jl_module_t *mod, jl_sym_t *name);
JL_DLLEXPORT jl_method_t *jl_method_def(jl_svec_t *argdata, jl_methtable_t *mt, jl_code_info_t *f, jl_module_t *module);
JL_DLLEXPORT jl_code_info_t *jl_code_for_staged(jl_method_instance_t *linfo, size_t world, jl_code_instance_t **cache);
JL_DLLEXPORT jl_code_info_t *jl_copy_code_info(jl_code_info_t *src);
@@ -1996,24 +2098,26 @@ JL_DLLEXPORT void jl_set_module_infer(jl_module_t *self, int value);
JL_DLLEXPORT int jl_get_module_infer(jl_module_t *m);
JL_DLLEXPORT void jl_set_module_max_methods(jl_module_t *self, int value);
JL_DLLEXPORT int jl_get_module_max_methods(jl_module_t *m);
+JL_DLLEXPORT jl_value_t *jl_get_module_usings_backedges(jl_module_t *m);
+JL_DLLEXPORT jl_value_t *jl_get_module_scanned_methods(jl_module_t *m);
+JL_DLLEXPORT jl_value_t *jl_get_module_binding_or_nothing(jl_module_t *m, jl_sym_t *s);
+
// get binding for reading
JL_DLLEXPORT jl_binding_t *jl_get_binding(jl_module_t *m JL_PROPAGATES_ROOT, jl_sym_t *var);
-JL_DLLEXPORT jl_binding_t *jl_get_binding_or_error(jl_module_t *m, jl_sym_t *var);
JL_DLLEXPORT jl_value_t *jl_module_globalref(jl_module_t *m, jl_sym_t *var);
JL_DLLEXPORT jl_value_t *jl_get_binding_type(jl_module_t *m, jl_sym_t *var);
// get binding for assignment
JL_DLLEXPORT void jl_check_binding_currently_writable(jl_binding_t *b, jl_module_t *m, jl_sym_t *s);
JL_DLLEXPORT jl_binding_t *jl_get_binding_wr(jl_module_t *m JL_PROPAGATES_ROOT, jl_sym_t *var);
-JL_DLLEXPORT jl_binding_t *jl_get_binding_for_method_def(jl_module_t *m JL_PROPAGATES_ROOT, jl_sym_t *var);
+JL_DLLEXPORT jl_value_t *jl_get_existing_strong_gf(jl_binding_t *b JL_PROPAGATES_ROOT, size_t new_world);
JL_DLLEXPORT int jl_boundp(jl_module_t *m, jl_sym_t *var, int allow_import);
-JL_DLLEXPORT int jl_defines_or_exports_p(jl_module_t *m, jl_sym_t *var);
-JL_DLLEXPORT int jl_binding_resolved_p(jl_module_t *m, jl_sym_t *var);
JL_DLLEXPORT int jl_is_const(jl_module_t *m, jl_sym_t *var);
JL_DLLEXPORT int jl_globalref_is_const(jl_globalref_t *gr);
JL_DLLEXPORT jl_value_t *jl_get_globalref_value(jl_globalref_t *gr);
JL_DLLEXPORT jl_value_t *jl_get_global(jl_module_t *m JL_PROPAGATES_ROOT, jl_sym_t *var);
JL_DLLEXPORT void jl_set_global(jl_module_t *m JL_ROOTING_ARGUMENT, jl_sym_t *var, jl_value_t *val JL_ROOTED_ARGUMENT);
JL_DLLEXPORT void jl_set_const(jl_module_t *m JL_ROOTING_ARGUMENT, jl_sym_t *var, jl_value_t *val JL_ROOTED_ARGUMENT);
+void jl_set_initial_const(jl_module_t *m JL_ROOTING_ARGUMENT, jl_sym_t *var, jl_value_t *val JL_ROOTED_ARGUMENT, int exported);
JL_DLLEXPORT void jl_checked_assignment(jl_binding_t *b, jl_module_t *mod, jl_sym_t *var, jl_value_t *rhs JL_MAYBE_UNROOTED);
JL_DLLEXPORT jl_value_t *jl_checked_swap(jl_binding_t *b, jl_module_t *mod, jl_sym_t *var, jl_value_t *rhs JL_MAYBE_UNROOTED);
JL_DLLEXPORT jl_value_t *jl_checked_replace(jl_binding_t *b, jl_module_t *mod, jl_sym_t *var, jl_value_t *expected, jl_value_t *rhs);
@@ -2022,14 +2126,13 @@ JL_DLLEXPORT jl_value_t *jl_checked_assignonce(jl_binding_t *b, jl_module_t *mod
JL_DLLEXPORT jl_binding_partition_t *jl_declare_constant_val(jl_binding_t *b JL_ROOTING_ARGUMENT, jl_module_t *mod, jl_sym_t *var, jl_value_t *val JL_ROOTED_ARGUMENT JL_MAYBE_UNROOTED);
JL_DLLEXPORT jl_binding_partition_t *jl_declare_constant_val2(jl_binding_t *b JL_ROOTING_ARGUMENT, jl_module_t *mod, jl_sym_t *var, jl_value_t *val JL_ROOTED_ARGUMENT JL_MAYBE_UNROOTED, enum jl_partition_kind);
JL_DLLEXPORT void jl_module_using(jl_module_t *to, jl_module_t *from);
-JL_DLLEXPORT void jl_module_use(jl_module_t *to, jl_module_t *from, jl_sym_t *s);
-JL_DLLEXPORT void jl_module_use_as(jl_module_t *to, jl_module_t *from, jl_sym_t *s, jl_sym_t *asname);
-JL_DLLEXPORT void jl_module_import(jl_module_t *to, jl_module_t *from, jl_sym_t *s);
-JL_DLLEXPORT void jl_module_import_as(jl_module_t *to, jl_module_t *from, jl_sym_t *s, jl_sym_t *asname);
-JL_DLLEXPORT void jl_module_public(jl_module_t *from, jl_sym_t *s, int exported);
+JL_DLLEXPORT void jl_module_use(jl_task_t *ct, jl_module_t *to, jl_module_t *from, jl_sym_t *s);
+JL_DLLEXPORT void jl_module_use_as(jl_task_t *ct, jl_module_t *to, jl_module_t *from, jl_sym_t *s, jl_sym_t *asname);
+JL_DLLEXPORT void jl_module_import(jl_task_t *ct, jl_module_t *to, jl_module_t *from, jl_sym_t *s);
+JL_DLLEXPORT void jl_module_import_as(jl_task_t *ct, jl_module_t *to, jl_module_t *from, jl_sym_t *s, jl_sym_t *asname);
+int jl_module_public_(jl_module_t *from, jl_sym_t *s, int exported, size_t new_world);
JL_DLLEXPORT int jl_is_imported(jl_module_t *m, jl_sym_t *s);
JL_DLLEXPORT int jl_module_exports_p(jl_module_t *m, jl_sym_t *var);
-JL_DLLEXPORT void jl_add_standard_imports(jl_module_t *m);
// eq hash tables
JL_DLLEXPORT jl_genericmemory_t *jl_eqtable_put(jl_genericmemory_t *h JL_ROOTING_ARGUMENT, jl_value_t *key, jl_value_t *val JL_ROOTED_ARGUMENT, int *inserted);
@@ -2117,11 +2220,30 @@ typedef enum {
JL_IMAGE_IN_MEMORY = 2
} JL_IMAGE_SEARCH;
+typedef enum {
+ JL_IMAGE_KIND_NONE = 0,
+ JL_IMAGE_KIND_JI,
+ JL_IMAGE_KIND_SO,
+} jl_image_kind_t;
+
+// A loaded, but unparsed .ji or .so image file
+typedef struct {
+ jl_image_kind_t kind;
+ void *handle;
+ const void *pointers; // jl_image_pointers_t *
+ const char *data;
+ size_t size;
+ uint64_t base;
+} jl_image_buf_t;
+
+struct _jl_image_t;
+typedef struct _jl_image_t jl_image_t;
+
JL_DLLIMPORT const char *jl_get_libdir(void);
-JL_DLLEXPORT void julia_init(JL_IMAGE_SEARCH rel);
JL_DLLEXPORT void jl_init(void);
-JL_DLLEXPORT void jl_init_with_image(const char *julia_bindir,
- const char *image_path);
+JL_DLLEXPORT void jl_init_with_image_file(const char *julia_bindir,
+ const char *image_path);
+JL_DLLEXPORT void jl_init_with_image_handle(void *handle);
JL_DLLEXPORT const char *jl_get_default_sysimg_path(void);
JL_DLLEXPORT int jl_is_initialized(void);
JL_DLLEXPORT void jl_atexit_hook(int status);
@@ -2133,11 +2255,10 @@ JL_DLLEXPORT const char *jl_pathname_for_handle(void *handle);
JL_DLLEXPORT jl_gcframe_t **jl_adopt_thread(void);
JL_DLLEXPORT int jl_deserialize_verify_header(ios_t *s);
-JL_DLLEXPORT void jl_preload_sysimg_so(const char *fname);
-JL_DLLEXPORT void jl_set_sysimg_so(void *handle);
+JL_DLLEXPORT jl_image_buf_t jl_preload_sysimg(const char *fname);
+JL_DLLEXPORT jl_image_buf_t jl_set_sysimg_so(void *handle);
JL_DLLEXPORT void jl_create_system_image(void **, jl_array_t *worklist, bool_t emit_split, ios_t **s, ios_t **z, jl_array_t **udeps, int64_t *srctextpos);
-JL_DLLEXPORT void jl_restore_system_image(const char *fname);
-JL_DLLEXPORT void jl_restore_system_image_data(const char *buf, size_t len);
+JL_DLLEXPORT void jl_restore_system_image(jl_image_t *image, jl_image_buf_t buf);
JL_DLLEXPORT jl_value_t *jl_restore_incremental(const char *fname, jl_array_t *depmods, int complete, const char *pkgimage);
JL_DLLEXPORT jl_value_t *jl_object_top_module(jl_value_t* v) JL_NOTSAFEPOINT;
@@ -2209,6 +2330,7 @@ JL_DLLEXPORT jl_value_t *jl_compress_ir(jl_method_t *m, jl_code_info_t *code);
JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t *metadata, jl_value_t *data);
JL_DLLEXPORT uint8_t jl_ir_flag_inlining(jl_value_t *data) JL_NOTSAFEPOINT;
JL_DLLEXPORT uint8_t jl_ir_flag_has_fcall(jl_value_t *data) JL_NOTSAFEPOINT;
+JL_DLLEXPORT uint8_t jl_ir_flag_has_image_globalref(jl_value_t *data) JL_NOTSAFEPOINT;
JL_DLLEXPORT uint16_t jl_ir_inlining_cost(jl_value_t *data) JL_NOTSAFEPOINT;
JL_DLLEXPORT ssize_t jl_ir_nslots(jl_value_t *data) JL_NOTSAFEPOINT;
JL_DLLEXPORT uint8_t jl_ir_slotflag(jl_value_t *data, size_t i) JL_NOTSAFEPOINT;
@@ -2253,6 +2375,9 @@ JL_DLLEXPORT jl_value_t *jl_call1(jl_function_t *f JL_MAYBE_UNROOTED, jl_value_t
JL_DLLEXPORT jl_value_t *jl_call2(jl_function_t *f JL_MAYBE_UNROOTED, jl_value_t *a JL_MAYBE_UNROOTED, jl_value_t *b JL_MAYBE_UNROOTED);
JL_DLLEXPORT jl_value_t *jl_call3(jl_function_t *f JL_MAYBE_UNROOTED, jl_value_t *a JL_MAYBE_UNROOTED,
jl_value_t *b JL_MAYBE_UNROOTED, jl_value_t *c JL_MAYBE_UNROOTED);
+JL_DLLEXPORT jl_value_t *jl_call4(jl_function_t *f JL_MAYBE_UNROOTED, jl_value_t *a JL_MAYBE_UNROOTED,
+ jl_value_t *b JL_MAYBE_UNROOTED, jl_value_t *c JL_MAYBE_UNROOTED,
+ jl_value_t *d JL_MAYBE_UNROOTED);
// async signal handling ------------------------------------------------------
@@ -2624,7 +2749,7 @@ typedef struct {
int gcstack_arg; // Pass the ptls value as an argument with swiftself
int use_jlplt; // Whether to use the Julia PLT mechanism or emit symbols directly
- int trim; // can we emit dynamic dispatches?
+ int force_emit_all; // Force emission of code for const return functions
} jl_cgparams_t;
extern JL_DLLEXPORT int jl_default_debug_info_kind;
extern JL_DLLEXPORT jl_cgparams_t jl_default_cgparams;
diff --git a/src/julia_internal.h b/src/julia_internal.h
index 9817c8cc8263b..cb28c21c9171e 100644
--- a/src/julia_internal.h
+++ b/src/julia_internal.h
@@ -3,6 +3,7 @@
#ifndef JL_INTERNAL_H
#define JL_INTERNAL_H
+#include "dtypes.h"
#include "options.h"
#include "julia_assert.h"
#include "julia_locks.h"
@@ -192,6 +193,9 @@ void JL_UV_LOCK(void);
extern _Atomic(unsigned) _threadedregion;
extern _Atomic(uint16_t) io_loop_tid;
+JL_DLLEXPORT void jl_init_(jl_image_buf_t sysimage);
+JL_DLLEXPORT void jl_enter_threaded_region(void);
+JL_DLLEXPORT void jl_exit_threaded_region(void);
int jl_running_under_rr(int recheck) JL_NOTSAFEPOINT;
//--------------------------------------------------
@@ -224,6 +228,7 @@ extern volatile size_t profile_bt_size_max;
extern volatile size_t profile_bt_size_cur;
extern volatile int profile_running;
extern volatile int profile_all_tasks;
+extern int heartbeat_tid; // Mostly used to ensure we skip this thread in the CPU profiler. XXX: not implemented on Windows
// Ensures that we can safely read the `live_tasks`field of every TLS when profiling.
// We want to avoid the case that a GC gets interleaved with `jl_profile_task` and shrinks
// the `live_tasks` array while we are reading it or frees tasks that are being profiled.
@@ -239,6 +244,7 @@ extern uv_mutex_t bt_data_prof_lock;
#define PROFILE_STATE_THREAD_NOT_SLEEPING (1)
#define PROFILE_STATE_THREAD_SLEEPING (2)
#define PROFILE_STATE_WALL_TIME_PROFILING (3)
+extern _Atomic(int) n_threads_running;
void jl_profile_task(void);
// number of cycles since power-on
@@ -650,6 +656,7 @@ STATIC_INLINE jl_value_t *undefref_check(jl_datatype_t *dt, jl_value_t *v) JL_NO
typedef struct {
uint16_t propagate_inbounds:1;
uint16_t has_fcall:1;
+ uint16_t has_image_globalref:1;
uint16_t nospecializeinfer:1;
uint16_t isva:1;
uint16_t nargsmatchesmethod:1;
@@ -669,6 +676,10 @@ typedef union {
#define SOURCE_MODE_NOT_REQUIRED 0x0
#define SOURCE_MODE_ABI 0x1
+#define METHOD_SIG_LATEST_WHICH 0b0001
+#define METHOD_SIG_LATEST_ONLY 0b0010
+#define METHOD_SIG_PRECOMPILE_MANY 0b0100
+
JL_DLLEXPORT jl_code_instance_t *jl_engine_reserve(jl_method_instance_t *m, jl_value_t *owner);
JL_DLLEXPORT void jl_engine_fulfill(jl_code_instance_t *ci, jl_code_info_t *src);
void jl_engine_sweep(jl_ptls_t *gc_all_tls_states) JL_NOTSAFEPOINT;
@@ -684,6 +695,7 @@ JL_DLLEXPORT jl_method_instance_t *jl_get_unspecialized(jl_method_t *def JL_PROP
JL_DLLEXPORT void jl_read_codeinst_invoke(jl_code_instance_t *ci, uint8_t *specsigflags, jl_callptr_t *invoke, void **specptr, int waitcompile);
JL_DLLEXPORT jl_method_instance_t *jl_method_match_to_mi(jl_method_match_t *match, size_t world, size_t min_valid, size_t max_valid, int mt_cache);
JL_DLLEXPORT void jl_add_codeinst_to_jit(jl_code_instance_t *codeinst, jl_code_info_t *src);
+JL_DLLEXPORT void jl_add_codeinst_to_cache(jl_code_instance_t *codeinst, jl_code_info_t *src);
JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst_uninit(jl_method_instance_t *mi, jl_value_t *owner);
JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst(
@@ -693,7 +705,7 @@ JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst(
int32_t const_flags, size_t min_world, size_t max_world,
uint32_t effects, jl_value_t *analysis_results,
jl_debuginfo_t *di, jl_svec_t *edges /* , int absolute_max*/);
-JL_DLLEXPORT jl_code_instance_t *jl_get_ci_equiv(jl_code_instance_t *ci JL_PROPAGATES_ROOT, int compiled) JL_NOTSAFEPOINT;
+JL_DLLEXPORT jl_code_instance_t *jl_get_ci_equiv(jl_code_instance_t *ci JL_PROPAGATES_ROOT, size_t target_world) JL_NOTSAFEPOINT;
STATIC_INLINE jl_method_instance_t *jl_get_ci_mi(jl_code_instance_t *ci JL_PROPAGATES_ROOT) JL_NOTSAFEPOINT
{
@@ -721,12 +733,32 @@ jl_code_info_t *jl_new_code_info_from_ir(jl_expr_t *ast);
JL_DLLEXPORT jl_code_info_t *jl_new_code_info_uninit(void);
JL_DLLEXPORT void jl_resolve_definition_effects_in_ir(jl_array_t *stmts, jl_module_t *m, jl_svec_t *sparam_vals, jl_value_t *binding_edge,
int binding_effects);
-JL_DLLEXPORT void jl_maybe_add_binding_backedge(jl_globalref_t *gr, jl_module_t *defining_module, jl_value_t *edge);
+JL_DLLEXPORT int jl_maybe_add_binding_backedge(jl_binding_t *b, jl_value_t *edge, jl_method_t *in_method);
JL_DLLEXPORT void jl_add_binding_backedge(jl_binding_t *b, jl_value_t *edge);
+static const uint8_t MI_FLAG_BACKEDGES_INUSE = 0b0100;
+static const uint8_t MI_FLAG_BACKEDGES_DIRTY = 0b1000;
+static const uint8_t MI_FLAG_BACKEDGES_ALL = 0b1100;
+
+STATIC_INLINE jl_array_t *jl_mi_get_backedges_mutate(jl_method_instance_t *mi JL_PROPAGATES_ROOT, uint8_t *flags) {
+ *flags = jl_atomic_load_relaxed(&mi->flags) & (MI_FLAG_BACKEDGES_ALL);
+ jl_array_t *ret = mi->backedges;
+ if (ret)
+ jl_atomic_fetch_or_relaxed(&mi->flags, MI_FLAG_BACKEDGES_INUSE);
+ return ret;
+}
+
+STATIC_INLINE jl_array_t *jl_mi_get_backedges(jl_method_instance_t *mi JL_PROPAGATES_ROOT) {
+ assert(!(jl_atomic_load_relaxed(&mi->flags) & MI_FLAG_BACKEDGES_ALL));
+ jl_array_t *ret = mi->backedges;
+ return ret;
+}
+
int get_next_edge(jl_array_t *list, int i, jl_value_t** invokesig, jl_code_instance_t **caller) JL_NOTSAFEPOINT;
int set_next_edge(jl_array_t *list, int i, jl_value_t *invokesig, jl_code_instance_t *caller);
+int clear_next_edge(jl_array_t *list, int i, jl_value_t *invokesig, jl_code_instance_t *caller);
void push_edge(jl_array_t *list, jl_value_t *invokesig, jl_code_instance_t *caller);
+void jl_mi_done_backedges(jl_method_instance_t *mi JL_PROPAGATES_ROOT, uint8_t old_flags);
JL_DLLEXPORT void jl_add_method_root(jl_method_t *m, jl_module_t *mod, jl_value_t* root);
void jl_append_method_roots(jl_method_t *m, uint64_t modid, jl_array_t* roots);
@@ -753,6 +785,32 @@ JL_CALLABLE(jl_f_opaque_closure_call);
void jl_install_default_signal_handlers(void);
void restore_signals(void);
void jl_install_thread_signal_handler(jl_ptls_t ptls);
+extern const size_t sig_stack_size;
+STATIC_INLINE int is_addr_on_sigstack(jl_ptls_t ptls, void *ptr)
+{
+ // One guard page for signal_stack.
+ return !((char*)ptr < (char*)ptls->signal_stack - jl_page_size ||
+ (char*)ptr > (char*)ptls->signal_stack + sig_stack_size);
+}
+STATIC_INLINE int jl_inside_signal_handler(void)
+{
+#if (defined(_OS_LINUX_) && defined(_CPU_X86_64_)) || (defined(_OS_DARWIN_) && defined(_CPU_AARCH64_))
+ // Read the stack pointer
+ size_t sp;
+#if defined(_OS_LINUX_) && defined(_CPU_X86_64_)
+ __asm__ __volatile__("movq %%rsp, %0" : "=r"(sp));
+#elif defined(_OS_DARWIN_) && defined(_CPU_AARCH64_)
+ __asm__ __volatile__("mov %0, sp" : "=r"(sp));
+#endif
+ // Check if the stack pointer is within the signal stack
+ jl_ptls_t ptls = jl_current_task->ptls;
+ return is_addr_on_sigstack(ptls, (void*)sp);
+#else
+ return 0;
+#endif
+}
+// File-descriptor for safe logging on signal handling
+extern int jl_sig_fd;
JL_DLLEXPORT jl_fptr_args_t jl_get_builtin_fptr(jl_datatype_t *dt);
@@ -843,12 +901,18 @@ int jl_type_equality_is_identity(jl_value_t *t1, jl_value_t *t2) JL_NOTSAFEPOINT
JL_DLLEXPORT void jl_eval_const_decl(jl_module_t *m, jl_value_t *arg, jl_value_t *val);
void jl_binding_set_type(jl_binding_t *b, jl_module_t *mod, jl_sym_t *sym, jl_value_t *ty);
void jl_eval_global_expr(jl_module_t *m, jl_expr_t *ex, int set_type);
-JL_DLLEXPORT void jl_declare_global(jl_module_t *m, jl_value_t *arg, jl_value_t *set_type);
+JL_DLLEXPORT void jl_declare_global(jl_module_t *m, jl_value_t *arg, jl_value_t *set_type, int strong);
JL_DLLEXPORT jl_binding_partition_t *jl_declare_constant_val3(jl_binding_t *b JL_ROOTING_ARGUMENT, jl_module_t *mod, jl_sym_t *var, jl_value_t *val JL_ROOTED_ARGUMENT JL_MAYBE_UNROOTED, enum jl_partition_kind, size_t new_world) JL_GLOBALLY_ROOTED;
JL_DLLEXPORT jl_value_t *jl_toplevel_eval_flex(jl_module_t *m, jl_value_t *e, int fast, int expanded, const char **toplevel_filename, int *toplevel_lineno);
+void jl_module_initial_using(jl_module_t *to, jl_module_t *from);
STATIC_INLINE struct _jl_module_using *module_usings_getidx(jl_module_t *m JL_PROPAGATES_ROOT, size_t i) JL_NOTSAFEPOINT;
STATIC_INLINE jl_module_t *module_usings_getmod(jl_module_t *m JL_PROPAGATES_ROOT, size_t i) JL_NOTSAFEPOINT;
+void jl_add_usings_backedge(jl_module_t *from, jl_module_t *to);
+typedef struct _modstack_t {
+ jl_binding_t *b;
+ struct _modstack_t *prev;
+} modstack_t;
#ifndef __clang_gcanalyzer__
// The analyzer doesn't like looking through the arraylist, so just model the
@@ -869,6 +933,8 @@ STATIC_INLINE size_t module_usings_max(jl_module_t *m) JL_NOTSAFEPOINT {
return m->usings.max/3;
}
+JL_DLLEXPORT jl_sym_t *jl_module_name(jl_module_t *m) JL_NOTSAFEPOINT;
+void jl_add_scanned_method(jl_module_t *m, jl_method_t *meth);
jl_value_t *jl_eval_global_var(jl_module_t *m JL_PROPAGATES_ROOT, jl_sym_t *e);
jl_value_t *jl_interpret_opaque_closure(jl_opaque_closure_t *clos, jl_value_t **args, size_t nargs);
jl_value_t *jl_interpret_toplevel_thunk(jl_module_t *m, jl_code_info_t *src);
@@ -904,13 +970,19 @@ JL_DLLEXPORT jl_value_t *jl_nth_slot_type(jl_value_t *sig JL_PROPAGATES_ROOT, si
void jl_compute_field_offsets(jl_datatype_t *st);
void jl_module_run_initializer(jl_module_t *m);
JL_DLLEXPORT jl_binding_t *jl_get_module_binding(jl_module_t *m JL_PROPAGATES_ROOT, jl_sym_t *var, int alloc);
-JL_DLLEXPORT void jl_binding_deprecation_warning(jl_module_t *m, jl_sym_t *sym, jl_binding_t *b);
+JL_DLLEXPORT void jl_binding_deprecation_warning(jl_binding_t *b);
+JL_DLLEXPORT jl_binding_partition_t *jl_replace_binding_locked(jl_binding_t *b JL_PROPAGATES_ROOT,
+ jl_binding_partition_t *old_bpart, jl_value_t *restriction_val, enum jl_partition_kind kind, size_t new_world) JL_GLOBALLY_ROOTED;
+JL_DLLEXPORT jl_binding_partition_t *jl_replace_binding_locked2(jl_binding_t *b JL_PROPAGATES_ROOT,
+ jl_binding_partition_t *old_bpart, jl_value_t *restriction_val, size_t kind, size_t new_world) JL_GLOBALLY_ROOTED;
+JL_DLLEXPORT void jl_update_loaded_bpart(jl_binding_t *b, jl_binding_partition_t *bpart);
extern jl_array_t *jl_module_init_order JL_GLOBALLY_ROOTED;
extern htable_t jl_current_modules JL_GLOBALLY_ROOTED;
-extern JL_DLLEXPORT jl_module_t *jl_precompile_toplevel_module JL_GLOBALLY_ROOTED;
+extern jl_module_t *jl_precompile_toplevel_module JL_GLOBALLY_ROOTED;
extern jl_genericmemory_t *jl_global_roots_list JL_GLOBALLY_ROOTED;
extern jl_genericmemory_t *jl_global_roots_keyset JL_GLOBALLY_ROOTED;
extern arraylist_t *jl_entrypoint_mis;
+JL_DLLEXPORT extern size_t jl_require_world;
JL_DLLEXPORT int jl_is_globally_rooted(jl_value_t *val JL_MAYBE_UNROOTED) JL_NOTSAFEPOINT;
JL_DLLEXPORT jl_value_t *jl_as_global_root(jl_value_t *val, int insert) JL_GLOBALLY_ROOTED;
extern jl_svec_t *precompile_field_replace JL_GLOBALLY_ROOTED;
@@ -922,114 +994,126 @@ jl_method_t *jl_make_opaque_closure_method(jl_module_t *module, jl_value_t *name
int nargs, jl_value_t *functionloc, jl_code_info_t *ci, int isva, int isinferred);
JL_DLLEXPORT int jl_is_valid_oc_argtype(jl_tupletype_t *argt, jl_method_t *source);
-EXTERN_INLINE_DECLARE enum jl_partition_kind decode_restriction_kind(jl_ptr_kind_union_t pku) JL_NOTSAFEPOINT
-{
-#ifdef _P64
- uint8_t bits = (pku & 0x7);
- jl_value_t *val = (jl_value_t*)(pku & ~0x7);
-
- if (val == NULL) {
- if (bits == BINDING_KIND_IMPLICIT) {
- return BINDING_KIND_GUARD;
- }
- if (bits == BINDING_KIND_CONST) {
- return BINDING_KIND_UNDEF_CONST;
- }
- } else {
- if (bits == BINDING_KIND_DECLARED) {
- return BINDING_KIND_BACKDATED_CONST;
- }
- }
-
- return (enum jl_partition_kind)bits;
-#else
- return (enum jl_partition_kind)pku.kind;
-#endif
+STATIC_INLINE int jl_bkind_is_some_import(enum jl_partition_kind kind) JL_NOTSAFEPOINT {
+ return kind == PARTITION_KIND_IMPLICIT_CONST || kind == PARTITION_KIND_IMPLICIT_GLOBAL || kind == PARTITION_KIND_EXPLICIT || kind == PARTITION_KIND_IMPORTED;
}
-STATIC_INLINE jl_value_t *decode_restriction_value(jl_ptr_kind_union_t JL_PROPAGATES_ROOT pku) JL_NOTSAFEPOINT
-{
-#ifdef _P64
- jl_value_t *val = (jl_value_t*)(pku & ~0x7);
- return val;
-#else
- return pku.val;
-#endif
+STATIC_INLINE int jl_bkind_is_some_explicit_import(enum jl_partition_kind kind) JL_NOTSAFEPOINT {
+ return kind == PARTITION_KIND_EXPLICIT || kind == PARTITION_KIND_IMPORTED;
}
-STATIC_INLINE jl_ptr_kind_union_t encode_restriction(jl_value_t *val, enum jl_partition_kind kind) JL_NOTSAFEPOINT
-{
-#ifdef _P64
- if (kind == BINDING_KIND_GUARD || kind == BINDING_KIND_DECLARED || kind == BINDING_KIND_FAILED || kind == BINDING_KIND_UNDEF_CONST)
- assert(val == NULL);
- else if (kind == BINDING_KIND_IMPLICIT || kind == BINDING_KIND_CONST || kind == BINDING_KIND_BACKDATED_CONST)
- assert(val != NULL);
- if (kind == BINDING_KIND_GUARD)
- kind = BINDING_KIND_IMPLICIT;
- else if (kind == BINDING_KIND_UNDEF_CONST)
- kind = BINDING_KIND_CONST;
- else if (kind == BINDING_KIND_BACKDATED_CONST)
- kind = BINDING_KIND_DECLARED;
- assert((((uintptr_t)val) & 0x7) == 0);
- return ((jl_ptr_kind_union_t)val) | kind;
-#else
- jl_ptr_kind_union_t ret = { val, kind };
- return ret;
-#endif
+STATIC_INLINE int jl_bkind_is_some_guard(enum jl_partition_kind kind) JL_NOTSAFEPOINT {
+ return kind == PARTITION_KIND_FAILED || kind == PARTITION_KIND_GUARD;
}
-STATIC_INLINE int jl_bkind_is_some_import(enum jl_partition_kind kind) JL_NOTSAFEPOINT {
- return kind == BINDING_KIND_IMPLICIT || kind == BINDING_KIND_EXPLICIT || kind == BINDING_KIND_IMPORTED;
+STATIC_INLINE int jl_bkind_is_some_implicit(enum jl_partition_kind kind) JL_NOTSAFEPOINT {
+ return kind == PARTITION_KIND_IMPLICIT_CONST || kind == PARTITION_KIND_IMPLICIT_GLOBAL || jl_bkind_is_some_guard(kind);
}
STATIC_INLINE int jl_bkind_is_some_constant(enum jl_partition_kind kind) JL_NOTSAFEPOINT {
- return kind == BINDING_KIND_CONST || kind == BINDING_KIND_CONST_IMPORT || kind == BINDING_KIND_UNDEF_CONST || kind == BINDING_KIND_BACKDATED_CONST;
+ return kind == PARTITION_KIND_IMPLICIT_CONST || kind == PARTITION_KIND_CONST || kind == PARTITION_KIND_CONST_IMPORT || kind == PARTITION_KIND_UNDEF_CONST || kind == PARTITION_KIND_BACKDATED_CONST;
}
STATIC_INLINE int jl_bkind_is_defined_constant(enum jl_partition_kind kind) JL_NOTSAFEPOINT {
- return kind == BINDING_KIND_CONST || kind == BINDING_KIND_CONST_IMPORT || kind == BINDING_KIND_BACKDATED_CONST;
-}
-
-STATIC_INLINE int jl_bkind_is_some_guard(enum jl_partition_kind kind) JL_NOTSAFEPOINT {
- return kind == BINDING_KIND_FAILED || kind == BINDING_KIND_GUARD || kind == BINDING_KIND_DECLARED;
+ return kind == PARTITION_KIND_IMPLICIT_CONST || kind == PARTITION_KIND_CONST || kind == PARTITION_KIND_CONST_IMPORT || kind == PARTITION_KIND_BACKDATED_CONST;
}
JL_DLLEXPORT jl_binding_partition_t *jl_get_binding_partition(jl_binding_t *b JL_PROPAGATES_ROOT, size_t world) JL_GLOBALLY_ROOTED;
+JL_DLLEXPORT jl_binding_partition_t *jl_get_binding_partition_with_hint(jl_binding_t *b JL_PROPAGATES_ROOT, jl_binding_partition_t *previous_part, size_t world) JL_GLOBALLY_ROOTED;
JL_DLLEXPORT jl_binding_partition_t *jl_get_binding_partition_all(jl_binding_t *b JL_PROPAGATES_ROOT, size_t min_world, size_t max_world) JL_GLOBALLY_ROOTED;
+struct restriction_kind_pair {
+ jl_binding_t *binding_if_global;
+ jl_value_t *restriction;
+ enum jl_partition_kind kind;
+ int maybe_depwarn;
+};
+JL_DLLEXPORT int jl_get_binding_leaf_partitions_restriction_kind(jl_binding_t *b JL_PROPAGATES_ROOT, struct restriction_kind_pair *rkp, size_t min_world, size_t max_world) JL_GLOBALLY_ROOTED;
+JL_DLLEXPORT jl_value_t *jl_get_binding_leaf_partitions_value_if_const(jl_binding_t *b JL_PROPAGATES_ROOT, int *maybe_depwarn, size_t min_world, size_t max_world);
+
EXTERN_INLINE_DECLARE uint8_t jl_bpart_get_kind(jl_binding_partition_t *bpart) JL_NOTSAFEPOINT {
- return decode_restriction_kind(jl_atomic_load_relaxed(&bpart->restriction));
+ return (uint8_t)(bpart->kind & 0xf);
}
-STATIC_INLINE jl_ptr_kind_union_t jl_walk_binding_inplace(jl_binding_t **bnd, jl_binding_partition_t **bpart JL_PROPAGATES_ROOT, size_t world) JL_NOTSAFEPOINT;
-STATIC_INLINE jl_ptr_kind_union_t jl_walk_binding_inplace_all(jl_binding_t **bnd, jl_binding_partition_t **bpart JL_PROPAGATES_ROOT, size_t min_world, size_t max_world) JL_NOTSAFEPOINT;
+STATIC_INLINE void jl_walk_binding_inplace(jl_binding_t **bnd, jl_binding_partition_t **bpart JL_PROPAGATES_ROOT, size_t world) JL_NOTSAFEPOINT;
+STATIC_INLINE void jl_walk_binding_inplace_depwarn(jl_binding_t **bnd, jl_binding_partition_t **bpart, size_t world, int *depwarn) JL_NOTSAFEPOINT;
+STATIC_INLINE void jl_walk_binding_inplace_all(jl_binding_t **bnd, jl_binding_partition_t **bpart JL_PROPAGATES_ROOT, int *depwarn, size_t min_world, size_t max_world) JL_NOTSAFEPOINT;
+STATIC_INLINE void jl_walk_binding_inplace_worlds(jl_binding_t **bnd, jl_binding_partition_t **bpart, size_t *min_world, size_t *max_world, int *depwarn, size_t world) JL_NOTSAFEPOINT;
#ifndef __clang_analyzer__
-STATIC_INLINE jl_ptr_kind_union_t jl_walk_binding_inplace(jl_binding_t **bnd, jl_binding_partition_t **bpart, size_t world) JL_NOTSAFEPOINT
+STATIC_INLINE void jl_walk_binding_inplace(jl_binding_t **bnd, jl_binding_partition_t **bpart, size_t world) JL_NOTSAFEPOINT
{
while (1) {
- if (!*bpart)
- return encode_restriction(NULL, BINDING_KIND_GUARD);
- jl_ptr_kind_union_t pku = jl_atomic_load_acquire(&(*bpart)->restriction);
- if (!jl_bkind_is_some_import(decode_restriction_kind(pku)))
- return pku;
- *bnd = (jl_binding_t*)decode_restriction_value(pku);
+ enum jl_partition_kind kind = jl_binding_kind(*bpart);
+ if (!jl_bkind_is_some_explicit_import(kind) && kind != PARTITION_KIND_IMPLICIT_GLOBAL)
+ return;
+ *bnd = (jl_binding_t*)(*bpart)->restriction;
*bpart = jl_get_binding_partition(*bnd, world);
}
}
-STATIC_INLINE jl_ptr_kind_union_t jl_walk_binding_inplace_all(jl_binding_t **bnd, jl_binding_partition_t **bpart, size_t min_world, size_t max_world) JL_NOTSAFEPOINT
+STATIC_INLINE void jl_walk_binding_inplace_depwarn(jl_binding_t **bnd, jl_binding_partition_t **bpart, size_t world, int *depwarn) JL_NOTSAFEPOINT
{
+ int passed_explicit = 0;
while (1) {
- if (!*bpart)
- return encode_restriction(NULL, BINDING_KIND_GUARD);
- jl_ptr_kind_union_t pku = jl_atomic_load_acquire(&(*bpart)->restriction);
- if (!jl_bkind_is_some_import(decode_restriction_kind(pku)))
- return pku;
- *bnd = (jl_binding_t*)decode_restriction_value(pku);
+ enum jl_partition_kind kind = jl_binding_kind(*bpart);
+ if (!jl_bkind_is_some_explicit_import(kind) && kind != PARTITION_KIND_IMPLICIT_GLOBAL) {
+ if (!passed_explicit && depwarn)
+ *depwarn |= (*bpart)->kind & PARTITION_FLAG_DEPWARN;
+ return;
+ }
+ if (!passed_explicit && depwarn)
+ *depwarn |= (*bpart)->kind & PARTITION_FLAG_DEPWARN;
+ if (kind != PARTITION_KIND_IMPLICIT_GLOBAL)
+ passed_explicit = 1;
+ *bnd = (jl_binding_t*)(*bpart)->restriction;
+ *bpart = jl_get_binding_partition(*bnd, world);
+ }
+}
+
+
+STATIC_INLINE void jl_walk_binding_inplace_all(jl_binding_t **bnd, jl_binding_partition_t **bpart, int *depwarn, size_t min_world, size_t max_world) JL_NOTSAFEPOINT
+{
+ int passed_explicit = 0;
+ while (*bpart) {
+ enum jl_partition_kind kind = jl_binding_kind(*bpart);
+ if (!jl_bkind_is_some_explicit_import(kind) && kind != PARTITION_KIND_IMPLICIT_GLOBAL) {
+ if (!passed_explicit && depwarn)
+ *depwarn |= (*bpart)->kind & PARTITION_FLAG_DEPWARN;
+ return;
+ }
+ if (!passed_explicit && depwarn)
+ *depwarn |= (*bpart)->kind & PARTITION_FLAG_DEPWARN;
+ if (kind != PARTITION_KIND_IMPLICIT_GLOBAL)
+ passed_explicit = 1;
+ *bnd = (jl_binding_t*)(*bpart)->restriction;
*bpart = jl_get_binding_partition_all(*bnd, min_world, max_world);
}
}
+
+STATIC_INLINE void jl_walk_binding_inplace_worlds(jl_binding_t **bnd, jl_binding_partition_t **bpart, size_t *min_world, size_t *max_world, int *depwarn, size_t world) JL_NOTSAFEPOINT
+{
+ int passed_explicit = 0;
+ while (*bpart) {
+ if (*min_world < (*bpart)->min_world)
+ *min_world = (*bpart)->min_world;
+ size_t bpart_max_world = jl_atomic_load_relaxed(&(*bpart)->max_world);
+ if (*max_world > bpart_max_world)
+ *max_world = bpart_max_world;
+ enum jl_partition_kind kind = jl_binding_kind(*bpart);
+ if (!jl_bkind_is_some_explicit_import(kind) && kind != PARTITION_KIND_IMPLICIT_GLOBAL) {
+ if (!passed_explicit && depwarn)
+ *depwarn |= (*bpart)->kind & PARTITION_FLAG_DEPWARN;
+ return;
+ }
+ if (!passed_explicit && depwarn)
+ *depwarn |= (*bpart)->kind & PARTITION_FLAG_DEPWARN;
+ if (kind != PARTITION_KIND_IMPLICIT_GLOBAL)
+ passed_explicit = 1;
+ *bnd = (jl_binding_t*)(*bpart)->restriction;
+ *bpart = jl_get_binding_partition(*bnd, world);
+ }
+}
#endif
STATIC_INLINE int is10digit(char c) JL_NOTSAFEPOINT
@@ -1228,16 +1312,18 @@ _Atomic(jl_value_t*) *jl_table_peek_bp(jl_genericmemory_t *a, jl_value_t *key) J
JL_DLLEXPORT jl_method_t *jl_new_method_uninit(jl_module_t*);
+jl_module_t *jl_new_module_(jl_sym_t *name, jl_module_t *parent, uint8_t default_using_core, uint8_t self_name);
+jl_module_t *jl_add_standard_imports(jl_module_t *m);
JL_DLLEXPORT jl_methtable_t *jl_new_method_table(jl_sym_t *name, jl_module_t *module);
-JL_DLLEXPORT jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types, size_t world, int mt_cache);
-jl_method_instance_t *jl_get_specialized(jl_method_t *m, jl_value_t *types, jl_svec_t *sp);
+JL_DLLEXPORT jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types JL_PROPAGATES_ROOT, size_t world, int mt_cache);
+jl_method_instance_t *jl_get_specialized(jl_method_t *m, jl_value_t *types, jl_svec_t *sp) JL_PROPAGATES_ROOT;
JL_DLLEXPORT jl_value_t *jl_rettype_inferred(jl_value_t *owner, jl_method_instance_t *li JL_PROPAGATES_ROOT, size_t min_world, size_t max_world);
JL_DLLEXPORT jl_value_t *jl_rettype_inferred_native(jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT;
JL_DLLEXPORT jl_code_instance_t *jl_method_compiled(jl_method_instance_t *mi JL_PROPAGATES_ROOT, size_t world) JL_NOTSAFEPOINT;
JL_DLLEXPORT jl_value_t *jl_methtable_lookup(jl_methtable_t *mt JL_PROPAGATES_ROOT, jl_value_t *type, size_t world);
JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo(
jl_method_t *m JL_PROPAGATES_ROOT, jl_value_t *type, jl_svec_t *sparams);
-jl_method_instance_t *jl_specializations_get_or_insert(jl_method_instance_t *mi_ins);
+jl_method_instance_t *jl_specializations_get_or_insert(jl_method_instance_t *mi_ins JL_PROPAGATES_ROOT);
JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee, jl_value_t *invokesig, jl_code_instance_t *caller);
JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *typ, jl_code_instance_t *caller);
JL_DLLEXPORT void jl_mi_cache_insert(jl_method_instance_t *mi JL_ROOTING_ARGUMENT,
@@ -1256,6 +1342,9 @@ JL_DLLEXPORT void jl_force_trace_compile_timing_disable(void);
JL_DLLEXPORT void jl_force_trace_dispatch_enable(void);
JL_DLLEXPORT void jl_force_trace_dispatch_disable(void);
+JL_DLLEXPORT void jl_tag_newly_inferred_enable(void);
+JL_DLLEXPORT void jl_tag_newly_inferred_disable(void);
+
uint32_t jl_module_next_counter(jl_module_t *m) JL_NOTSAFEPOINT;
jl_tupletype_t *arg_type_tuple(jl_value_t *arg1, jl_value_t **args, size_t nargs);
@@ -1263,6 +1352,9 @@ JL_DLLEXPORT int jl_has_meta(jl_array_t *body, jl_sym_t *sym) JL_NOTSAFEPOINT;
JL_DLLEXPORT jl_value_t *jl_parse(const char *text, size_t text_len, jl_value_t *filename,
size_t lineno, size_t offset, jl_value_t *options);
+jl_code_info_t *jl_inner_ctor_body(jl_array_t *fieldkinds, jl_module_t *inmodule, const char *file, int line);
+jl_code_info_t *jl_outer_ctor_body(jl_value_t *thistype, size_t nfields, size_t nsparams, jl_module_t *inmodule, const char *file, int line);
+void jl_ctor_def(jl_value_t *ty, jl_value_t *functionloc);
//--------------------------------------------------
// Backtraces
@@ -1425,8 +1517,8 @@ JL_DLLEXPORT jl_value_t *jl_get_backtrace(void);
void jl_critical_error(int sig, int si_code, bt_context_t *context, jl_task_t *ct);
JL_DLLEXPORT void jl_raise_debugger(void) JL_NOTSAFEPOINT;
JL_DLLEXPORT void jl_gdblookup(void* ip) JL_NOTSAFEPOINT;
-void jl_print_native_codeloc(uintptr_t ip) JL_NOTSAFEPOINT;
-void jl_print_bt_entry_codeloc(jl_bt_element_t *bt_data) JL_NOTSAFEPOINT;
+void jl_print_native_codeloc(char *pre_str, uintptr_t ip) JL_NOTSAFEPOINT;
+void jl_print_bt_entry_codeloc(int sig, jl_bt_element_t *bt_data) JL_NOTSAFEPOINT;
#ifdef _OS_WINDOWS_
JL_DLLEXPORT void jl_refresh_dbg_module_list(void);
#endif
@@ -1549,7 +1641,7 @@ void win32_formatmessage(DWORD code, char *reason, int len) JL_NOTSAFEPOINT;
#endif
JL_DLLEXPORT void *jl_get_library_(const char *f_lib, int throw_err);
-void *jl_find_dynamic_library_by_addr(void *symbol);
+void *jl_find_dynamic_library_by_addr(void *symbol, int throw_err);
#define jl_get_library(f_lib) jl_get_library_(f_lib, 1)
JL_DLLEXPORT void *jl_load_and_lookup(const char *f_lib, const char *f_name, _Atomic(void*) *hnd);
JL_DLLEXPORT void *jl_lazy_load_and_lookup(jl_value_t *lib_val, const char *f_name);
@@ -1557,6 +1649,9 @@ JL_DLLEXPORT jl_value_t *jl_get_cfunction_trampoline(
jl_value_t *fobj, jl_datatype_t *result, htable_t *cache, jl_svec_t *fill,
void *(*init_trampoline)(void *tramp, void **nval),
jl_unionall_t *env, jl_value_t **vals);
+JL_DLLEXPORT void *jl_get_abi_converter(jl_task_t *ct, _Atomic(void*) *fptr, _Atomic(size_t) *last_world, void *data);
+JL_DLLIMPORT void *jl_jit_abi_converter(jl_task_t *ct, void *unspecialized, jl_value_t *declrt, jl_value_t *sigt, size_t nargs, int specsig,
+ jl_code_instance_t *codeinst, jl_callptr_t invoke, void *target, int target_specsig);
// Special filenames used to refer to internal julia libraries
@@ -1675,6 +1770,10 @@ JL_DLLEXPORT jl_array_t *jl_array_copy(jl_array_t *ary);
JL_DLLEXPORT uintptr_t jl_object_id_(uintptr_t tv, jl_value_t *v) JL_NOTSAFEPOINT;
JL_DLLEXPORT void jl_set_next_task(jl_task_t *task) JL_NOTSAFEPOINT;
+JL_DLLEXPORT uint16_t julia_double_to_half(double param) JL_NOTSAFEPOINT;
+JL_DLLEXPORT uint16_t julia_float_to_half(float param) JL_NOTSAFEPOINT;
+JL_DLLEXPORT float julia_half_to_float(uint16_t param) JL_NOTSAFEPOINT;
+
// -- synchronization utilities -- //
extern jl_mutex_t typecache_lock;
@@ -1945,24 +2044,6 @@ jl_sym_t *_jl_symbol(const char *str, size_t len) JL_NOTSAFEPOINT;
#define JL_GC_ASSERT_LIVE(x) (void)(x)
#endif
-#ifdef _OS_WINDOWS_
-// On Windows, weak symbols do not default to 0 due to a GCC bug
-// (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90826), use symbol
-// aliases with a known value instead.
-#define JL_WEAK_SYMBOL_OR_ALIAS_DEFAULT(sym) __attribute__((weak,alias(#sym)))
-#define JL_WEAK_SYMBOL_DEFAULT(sym) &sym
-#else
-#define JL_WEAK_SYMBOL_OR_ALIAS_DEFAULT(sym) __attribute__((weak))
-#define JL_WEAK_SYMBOL_DEFAULT(sym) NULL
-#endif
-
-//JL_DLLEXPORT float julia__gnu_h2f_ieee(half param) JL_NOTSAFEPOINT;
-//JL_DLLEXPORT half julia__gnu_f2h_ieee(float param) JL_NOTSAFEPOINT;
-//JL_DLLEXPORT half julia__truncdfhf2(double param) JL_NOTSAFEPOINT;
-//JL_DLLEXPORT float julia__truncsfbf2(float param) JL_NOTSAFEPOINT;
-//JL_DLLEXPORT float julia__truncdfbf2(double param) JL_NOTSAFEPOINT;
-//JL_DLLEXPORT double julia__extendhfdf2(half n) JL_NOTSAFEPOINT;
-
JL_DLLEXPORT uint32_t jl_crc32c(uint32_t crc, const char *buf, size_t len);
// -- exports from codegen -- //
@@ -1971,7 +2052,6 @@ JL_DLLEXPORT uint32_t jl_crc32c(uint32_t crc, const char *buf, size_t len);
JL_DLLIMPORT void jl_generate_fptr_for_unspecialized(jl_code_instance_t *unspec);
JL_DLLIMPORT int jl_compile_codeinst(jl_code_instance_t *unspec);
-JL_DLLIMPORT int jl_compile_extern_c(LLVMOrcThreadSafeModuleRef llvmmod, void *params, void *sysimg, jl_value_t *declrt, jl_value_t *sigt);
JL_DLLIMPORT void jl_emit_codeinst_to_jit(jl_code_instance_t *codeinst, jl_code_info_t *src);
typedef struct {
diff --git a/src/llvm-alloc-helpers.cpp b/src/llvm-alloc-helpers.cpp
index 194c6837860ca..a1ed66a190190 100644
--- a/src/llvm-alloc-helpers.cpp
+++ b/src/llvm-alloc-helpers.cpp
@@ -214,6 +214,7 @@ void jl_alloc::runEscapeAnalysis(llvm::CallInst *I, EscapeAnalysisRequiredArgs r
}
if (auto call = dyn_cast(inst)) {
// TODO handle `memcmp`
+ // TODO handle `memcpy` which is used a lot more often since opaque pointers
// None of the intrinsics should care if the memory is stack or heap allocated.
auto callee = call->getCalledOperand();
if (auto II = dyn_cast(call)) {
diff --git a/src/llvm-alloc-opt.cpp b/src/llvm-alloc-opt.cpp
index 7dd794a4d8847..ce1d22f42d0ae 100644
--- a/src/llvm-alloc-opt.cpp
+++ b/src/llvm-alloc-opt.cpp
@@ -742,7 +742,9 @@ void Optimizer::moveToStack(CallInst *orig_inst, size_t sz, bool has_ref, AllocF
auto replace_inst = [&] (Instruction *user) {
Instruction *orig_i = cur.orig_i;
Instruction *new_i = cur.new_i;
- if (isa(user) || isa(user)) {
+ if (isa(user) || isa(user) ||
+ isa(user) || isa(user)) {
+ // TODO: these atomics are likely removable if the user is the first argument
user->replaceUsesOfWith(orig_i, new_i);
}
else if (auto call = dyn_cast(user)) {
@@ -1111,6 +1113,7 @@ void Optimizer::splitOnStack(CallInst *orig_inst)
return;
}
else if (isa(user) || isa(user)) {
+ // TODO: Downgrade atomics here potentially
auto slot_idx = find_slot(offset);
auto &slot = slots[slot_idx];
assert(slot.offset <= offset && slot.offset + slot.size >= offset);
diff --git a/src/llvm-codegen-shared.h b/src/llvm-codegen-shared.h
index ff6f5a97299d7..d474fb4f61183 100644
--- a/src/llvm-codegen-shared.h
+++ b/src/llvm-codegen-shared.h
@@ -244,21 +244,17 @@ static inline llvm::Value *emit_gc_state_set(llvm::IRBuilder<> &builder, llvm::T
unsigned offset = offsetof(jl_tls_states_t, gc_state);
Value *gc_state = builder.CreateConstInBoundsGEP1_32(T_int8, ptls, offset, "gc_state");
if (old_state == nullptr) {
- old_state = builder.CreateLoad(T_int8, gc_state);
+ old_state = builder.CreateLoad(T_int8, gc_state, "old_state");
cast(old_state)->setOrdering(AtomicOrdering::Monotonic);
}
builder.CreateAlignedStore(state, gc_state, Align(sizeof(void*)))->setOrdering(AtomicOrdering::Release);
if (auto *C = dyn_cast(old_state))
- if (C->isZero())
- return old_state;
- if (auto *C = dyn_cast(state))
- if (!C->isZero())
- return old_state;
+ if (auto *C2 = dyn_cast(state))
+ if (C->getZExtValue() == C2->getZExtValue())
+ return old_state;
BasicBlock *passBB = BasicBlock::Create(builder.getContext(), "safepoint", builder.GetInsertBlock()->getParent());
BasicBlock *exitBB = BasicBlock::Create(builder.getContext(), "after_safepoint", builder.GetInsertBlock()->getParent());
- Constant *zero8 = ConstantInt::get(T_int8, 0);
- builder.CreateCondBr(builder.CreateOr(builder.CreateICmpEQ(old_state, zero8), // if (!old_state || !state)
- builder.CreateICmpEQ(state, zero8)),
+ builder.CreateCondBr(builder.CreateICmpEQ(old_state, state, "is_new_state"), // Safepoint whenever we change the GC state
passBB, exitBB);
builder.SetInsertPoint(passBB);
MDNode *tbaa = get_tbaa_const(builder.getContext());
diff --git a/src/llvm-late-gc-lowering.cpp b/src/llvm-late-gc-lowering.cpp
index 7d6fba65a79e7..46214666c5d36 100644
--- a/src/llvm-late-gc-lowering.cpp
+++ b/src/llvm-late-gc-lowering.cpp
@@ -1,6 +1,8 @@
// This file is a part of Julia. License is MIT: https://julialang.org/license
#include "llvm-gc-interface-passes.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/Casting.h"
#define DEBUG_TYPE "late_lower_gcroot"
@@ -171,12 +173,12 @@ static std::pair FindBaseValue(const State &S, Value *V, bool UseCac
(void)LI;
break;
}
- else if (auto II = dyn_cast(CurrentV)) {
- // Some intrinsics behave like LoadInst followed by a SelectInst
- // This should never happen in a derived addrspace (since those cannot be stored to memory)
- // so we don't need to lift these operations, but we do need to check if it's loaded and continue walking the base pointer
+ else if (auto *II = dyn_cast(CurrentV)) {
if (II->getIntrinsicID() == Intrinsic::masked_load ||
II->getIntrinsicID() == Intrinsic::masked_gather) {
+ // Some intrinsics behave like LoadInst followed by a SelectInst
+ // This should never happen in a derived addrspace (since those cannot be stored to memory)
+ // so we don't need to lift these operations, but we do need to check if it's loaded and continue walking the base pointer
if (auto VTy = dyn_cast(II->getType())) {
if (hasLoadedTy(VTy->getElementType())) {
Value *Mask = II->getOperand(2);
@@ -205,6 +207,24 @@ static std::pair FindBaseValue(const State &S, Value *V, bool UseCac
// In general a load terminates a walk
break;
}
+ else if (II->getIntrinsicID() == Intrinsic::vector_extract) {
+ if (auto VTy = dyn_cast(II->getType())) {
+ if (hasLoadedTy(VTy->getElementType())) {
+ Value *Idx = II->getOperand(1);
+ if (!isa(Idx)) {
+ assert(isa(Idx) && "unimplemented");
+ (void)Idx;
+ }
+ CurrentV = II->getOperand(0);
+ fld_idx = -1;
+ continue;
+ }
+ }
+ break;
+ } else {
+ // Unknown Intrinsic
+ break;
+ }
}
else if (auto CI = dyn_cast(CurrentV)) {
auto callee = CI->getCalledFunction();
@@ -212,9 +232,11 @@ static std::pair FindBaseValue(const State &S, Value *V, bool UseCac
CurrentV = CI->getArgOperand(0);
continue;
}
+ // Unknown Call
break;
}
else {
+ // Unknown Instruction
break;
}
}
@@ -518,6 +540,22 @@ SmallVector LateLowerGCFrame::NumberAllBase(State &S, Value *CurrentV) {
Numbers = NumberAll(S, IEI->getOperand(0));
int ElNumber = Number(S, IEI->getOperand(1));
Numbers[idx] = ElNumber;
+ // C++17
+ // } else if (auto *II = dyn_cast(CurrentV); II && II->getIntrinsicID() == Intrinsic::vector_insert) {
+ } else if (isa(CurrentV) && cast(CurrentV)->getIntrinsicID() == Intrinsic::vector_insert) {
+ auto *II = dyn_cast(CurrentV);
+ // Vector insert is a bit like a shuffle so use the same approach
+ SmallVector Numbers1 = NumberAll(S, II->getOperand(0));
+ SmallVector Numbers2 = NumberAll(S, II->getOperand(1));
+ unsigned first_idx = cast(II->getOperand(2))->getZExtValue();
+ for (unsigned i = 0; i < Numbers1.size(); ++i) {
+ if (i < first_idx)
+ Numbers.push_back(Numbers1[i]);
+ else if (i - first_idx < Numbers2.size())
+ Numbers.push_back(Numbers2[i - first_idx]);
+ else
+ Numbers.push_back(Numbers1[i]);
+ }
} else if (auto *IVI = dyn_cast(CurrentV)) {
Numbers = NumberAll(S, IVI->getAggregateOperand());
auto Tracked = TrackCompositeType(IVI->getType());
@@ -1150,6 +1188,10 @@ State LateLowerGCFrame::LocalScan(Function &F) {
}
}
}
+ if (II->getIntrinsicID() == Intrinsic::vector_extract || II->getIntrinsicID() == Intrinsic::vector_insert) {
+ // These are not real defs
+ continue;
+ }
}
auto callee = CI->getCalledFunction();
if (callee && callee == typeof_func) {
@@ -1979,8 +2021,10 @@ bool LateLowerGCFrame::CleanupIR(Function &F, State *S, bool *CFGModified) {
// strip all constant alias information, as it might depend on the gc having
// preserved a gc root, which stops being true after this pass (#32215)
// similar to RewriteStatepointsForGC::stripNonValidData, but less aggressive
- if (I->getMetadata(LLVMContext::MD_invariant_load))
- I->setMetadata(LLVMContext::MD_invariant_load, NULL);
+ if (auto *LI = dyn_cast(I)){
+ if (isSpecialPtr(LI->getPointerOperand()->getType()) && LI->getMetadata(LLVMContext::MD_invariant_load))
+ LI->setMetadata(LLVMContext::MD_invariant_load, NULL);
+ }
if (MDNode *TBAA = I->getMetadata(LLVMContext::MD_tbaa)) {
if (TBAA->getNumOperands() == 4 && isTBAA(TBAA, {"jtbaa_const", "jtbaa_memoryptr", "jtbaa_memorylen", "tbaa_memoryown"})) {
MDNode *MutableTBAA = createMutableTBAAAccessTag(TBAA);
@@ -2181,16 +2225,47 @@ bool LateLowerGCFrame::CleanupIR(Function &F, State *S, bool *CFGModified) {
NewCall->copyMetadata(*CI);
CI->replaceAllUsesWith(NewCall);
UpdatePtrNumbering(CI, NewCall, S);
- } else if (CI->arg_size() == CI->getNumOperands()) {
- /* No operand bundle to lower */
- ++it;
- continue;
} else {
- CallInst *NewCall = CallInst::Create(CI, None, CI);
- NewCall->takeName(CI);
- NewCall->copyMetadata(*CI);
- CI->replaceAllUsesWith(NewCall);
- UpdatePtrNumbering(CI, NewCall, S);
+ SmallVector bundles;
+ CI->getOperandBundlesAsDefs(bundles);
+ bool gc_transition = false;
+ for (auto &bundle: bundles)
+ if (bundle.getTag() == "gc-transition")
+ gc_transition = true;
+
+ // In theory LLVM wants us to lower this using RewriteStatepointsForGC
+ if (gc_transition) {
+ // Insert the operations to switch to gc_safe if necessary.
+ IRBuilder<> builder(CI);
+ Value *pgcstack = getOrAddPGCstack(F);
+ assert(pgcstack);
+ // We dont use emit_state_set here because safepoints are unconditional for any code that reaches this
+ // We are basically guaranteed to go from gc_unsafe to gc_safe and back, and both transitions need a safepoint
+ // We also can't add any BBs here, so just avoiding the branches is good
+ Value *ptls = get_current_ptls_from_task(builder, get_current_task_from_pgcstack(builder, pgcstack), tbaa_gcframe);
+ unsigned offset = offsetof(jl_tls_states_t, gc_state);
+ Value *gc_state = builder.CreateConstInBoundsGEP1_32(Type::getInt8Ty(builder.getContext()), ptls, offset, "gc_state");
+ LoadInst *last_gc_state = builder.CreateAlignedLoad(Type::getInt8Ty(builder.getContext()), gc_state, Align(sizeof(void*)));
+ last_gc_state->setOrdering(AtomicOrdering::Monotonic);
+ builder.CreateAlignedStore(builder.getInt8(JL_GC_STATE_SAFE), gc_state, Align(sizeof(void*)))->setOrdering(AtomicOrdering::Release);
+ MDNode *tbaa = get_tbaa_const(builder.getContext());
+ emit_gc_safepoint(builder, T_size, ptls, tbaa, false);
+ builder.SetInsertPoint(CI->getNextNode());
+ builder.CreateAlignedStore(last_gc_state, gc_state, Align(sizeof(void*)))->setOrdering(AtomicOrdering::Release);
+ emit_gc_safepoint(builder, T_size, ptls, tbaa, false);
+ }
+ if (CI->arg_size() == CI->getNumOperands()) {
+ /* No operand bundle to lower */
+ ++it;
+ continue;
+ } else {
+ // remove operand bundle
+ CallInst *NewCall = CallInst::Create(CI, None, CI);
+ NewCall->takeName(CI);
+ NewCall->copyMetadata(*CI);
+ CI->replaceAllUsesWith(NewCall);
+ UpdatePtrNumbering(CI, NewCall, S);
+ }
}
if (!CI->use_empty()) {
CI->replaceAllUsesWith(UndefValue::get(CI->getType()));
@@ -2291,7 +2366,7 @@ void LateLowerGCFrame::PlaceGCFrameStores(State &S, unsigned MinColorRoot,
const LargeSparseBitVector &NowLive = S.LiveSets[*rit];
// reset slots which are no longer alive
for (int Idx : *LastLive) {
- if (Idx >= PreAssignedColors && !HasBitSet(NowLive, Idx)) {
+ if (Colors[Idx] >= PreAssignedColors && !HasBitSet(NowLive, Idx)) {
PlaceGCFrameReset(S, Idx, MinColorRoot, Colors, GCFrame,
S.ReverseSafepointNumbering[*rit]);
}
diff --git a/src/llvm-multiversioning.cpp b/src/llvm-multiversioning.cpp
index a76d076ebd6f3..02f77298513ee 100644
--- a/src/llvm-multiversioning.cpp
+++ b/src/llvm-multiversioning.cpp
@@ -216,7 +216,7 @@ static void annotate_module_clones(Module &M) {
if (auto maybe_specs = get_target_specs(M)) {
specs = std::move(*maybe_specs);
} else {
- auto full_specs = jl_get_llvm_clone_targets();
+ auto full_specs = jl_get_llvm_clone_targets(jl_options.cpu_target);
specs.reserve(full_specs.size());
for (auto &spec: full_specs) {
specs.push_back(TargetSpec::fromSpec(spec));
diff --git a/src/llvm-pass-helpers.cpp b/src/llvm-pass-helpers.cpp
index ca25251040fb2..9d415d923ecb6 100644
--- a/src/llvm-pass-helpers.cpp
+++ b/src/llvm-pass-helpers.cpp
@@ -88,6 +88,27 @@ llvm::CallInst *JuliaPassContext::getPGCstack(llvm::Function &F) const
return nullptr;
}
+llvm::CallInst *JuliaPassContext::getOrAddPGCstack(llvm::Function &F)
+{
+ if (pgcstack_getter || adoptthread_func)
+ for (auto &I : F.getEntryBlock()) {
+ if (CallInst *callInst = dyn_cast(&I)) {
+ Value *callee = callInst->getCalledOperand();
+ if ((pgcstack_getter && callee == pgcstack_getter) ||
+ (adoptthread_func && callee == adoptthread_func)) {
+ return callInst;
+ }
+ }
+ }
+ IRBuilder<> builder(&F.getEntryBlock().front());
+ if (pgcstack_getter)
+ return builder.CreateCall(pgcstack_getter);
+ auto FT = FunctionType::get(PointerType::get(F.getContext(), 0), false);
+ auto F2 = Function::Create(FT, Function::ExternalLinkage, "julia.get_pgcstack", F.getParent());
+ pgcstack_getter = F2;
+ return builder.CreateCall( F2);
+}
+
llvm::Function *JuliaPassContext::getOrNull(
const jl_intrinsics::IntrinsicDescription &desc) const
{
diff --git a/src/llvm-pass-helpers.h b/src/llvm-pass-helpers.h
index d46f1f46634e6..ac08cda2d61e0 100644
--- a/src/llvm-pass-helpers.h
+++ b/src/llvm-pass-helpers.h
@@ -87,7 +87,10 @@ struct JuliaPassContext {
// point of the given function, if there exists such a call.
// Otherwise, `nullptr` is returned.
llvm::CallInst *getPGCstack(llvm::Function &F) const;
-
+ // Gets a call to the `julia.get_pgcstack' intrinsic in the entry
+ // point of the given function, if there exists such a call.
+ // Otherwise, creates a new call to the intrinsic
+ llvm::CallInst *getOrAddPGCstack(llvm::Function &F);
// Gets the intrinsic or well-known function that conforms to
// the given description if it exists in the module. If not,
// `nullptr` is returned.
diff --git a/src/llvm-ptls.cpp b/src/llvm-ptls.cpp
index e36136859517a..97c9d5a9551f5 100644
--- a/src/llvm-ptls.cpp
+++ b/src/llvm-ptls.cpp
@@ -170,11 +170,11 @@ void LowerPTLS::fix_pgcstack_use(CallInst *pgcstack, Function *pgcstack_getter,
*CFGModified = true;
// emit slow branch code
CallInst *adopt = cast(pgcstack->clone());
- Function *adoptFunc = M->getFunction(XSTR(jl_adopt_thread));
+ Function *adoptFunc = M->getFunction(XSTR(jl_autoinit_and_adopt_thread));
if (adoptFunc == NULL) {
adoptFunc = Function::Create(pgcstack_getter->getFunctionType(),
pgcstack_getter->getLinkage(), pgcstack_getter->getAddressSpace(),
- XSTR(jl_adopt_thread), M);
+ XSTR(jl_autoinit_and_adopt_thread), M);
adoptFunc->copyAttributesFrom(pgcstack_getter);
adoptFunc->copyMetadata(pgcstack_getter, 0);
}
@@ -196,8 +196,13 @@ void LowerPTLS::fix_pgcstack_use(CallInst *pgcstack, Function *pgcstack_getter,
last_gc_state->addIncoming(prior, fastTerm->getParent());
for (auto &BB : *pgcstack->getParent()->getParent()) {
if (isa(BB.getTerminator())) {
+ // Don't use emit_gc_safe_leave here, as that introduces a new BB while iterating BBs
builder.SetInsertPoint(BB.getTerminator());
- emit_gc_unsafe_leave(builder, T_size, get_current_ptls_from_task(builder, get_current_task_from_pgcstack(builder, phi), tbaa), last_gc_state, true);
+ Value *ptls = get_current_ptls_from_task(builder, get_current_task_from_pgcstack(builder, phi), tbaa_gcframe);
+ unsigned offset = offsetof(jl_tls_states_t, gc_state);
+ Value *gc_state = builder.CreateConstInBoundsGEP1_32(Type::getInt8Ty(builder.getContext()), ptls, offset, "gc_state");
+ builder.CreateAlignedStore(last_gc_state, gc_state, Align(sizeof(void*)))->setOrdering(AtomicOrdering::Release);
+ emit_gc_safepoint(builder, T_size, ptls, tbaa, true);
}
}
}
diff --git a/src/llvm-remove-addrspaces.cpp b/src/llvm-remove-addrspaces.cpp
index bb492f467e74c..78ff70b12409b 100644
--- a/src/llvm-remove-addrspaces.cpp
+++ b/src/llvm-remove-addrspaces.cpp
@@ -256,7 +256,7 @@ bool removeAddrspaces(Module &M, AddrspaceRemapFunction ASRemapper)
Name,
(GlobalVariable *)nullptr,
GV->getThreadLocalMode(),
- GV->getType()->getAddressSpace());
+ cast(TypeRemapper.remapType(GV->getType()))->getAddressSpace());
NGV->copyAttributesFrom(GV);
VMap[GV] = NGV;
}
@@ -276,7 +276,7 @@ bool removeAddrspaces(Module &M, AddrspaceRemapFunction ASRemapper)
auto *NGA = GlobalAlias::create(
TypeRemapper.remapType(GA->getValueType()),
- GA->getType()->getPointerAddressSpace(),
+ cast(TypeRemapper.remapType(GA->getType()))->getAddressSpace(),
GA->getLinkage(),
Name,
&M);
diff --git a/src/method.c b/src/method.c
index 8a14eb00182b1..1f195a5eef979 100644
--- a/src/method.c
+++ b/src/method.c
@@ -39,18 +39,55 @@ static void check_c_types(const char *where, jl_value_t *rt, jl_value_t *at)
}
}
+void jl_add_scanned_method(jl_module_t *m, jl_method_t *meth)
+{
+ JL_LOCK(&m->lock);
+ if (m->scanned_methods == jl_nothing) {
+ m->scanned_methods = (jl_value_t*)jl_alloc_vec_any(0);
+ jl_gc_wb(m, m->scanned_methods);
+ }
+ jl_array_ptr_1d_push((jl_array_t*)m->scanned_methods, (jl_value_t*)meth);
+ JL_UNLOCK(&m->lock);
+}
+
+JL_DLLEXPORT void jl_scan_method_source_now(jl_method_t *m, jl_value_t *src)
+{
+ if (!jl_atomic_fetch_or(&m->did_scan_source, 1)) {
+ jl_code_info_t *code = NULL;
+ JL_GC_PUSH1(&code);
+ if (!jl_is_code_info(src))
+ code = jl_uncompress_ir(m, NULL, src);
+ else
+ code = (jl_code_info_t*)src;
+ jl_array_t *stmts = code->code;
+ size_t i, l = jl_array_nrows(stmts);
+ int any_implicit = 0;
+ for (i = 0; i < l; i++) {
+ jl_value_t *stmt = jl_array_ptr_ref(stmts, i);
+ if (jl_is_globalref(stmt)) {
+ jl_globalref_t *gr = (jl_globalref_t*)stmt;
+ jl_binding_t *b = gr->binding;
+ if (!b)
+ b = jl_get_module_binding(gr->mod, gr->name, 1);
+ any_implicit |= jl_maybe_add_binding_backedge(b, (jl_value_t*)m, m);
+ }
+ }
+ if (any_implicit && !(jl_atomic_fetch_or(&m->did_scan_source, 0x2) & 0x2))
+ jl_add_scanned_method(m->module, m);
+ JL_GC_POP();
+ }
+}
+
// Resolve references to non-locally-defined variables to become references to global
// variables in `module` (unless the rvalue is one of the type parameters in `sparam_vals`).
static jl_value_t *resolve_definition_effects(jl_value_t *expr, jl_module_t *module, jl_svec_t *sparam_vals, jl_value_t *binding_edge,
int binding_effects, int eager_resolve)
{
if (jl_is_symbol(expr)) {
- jl_error("Found raw symbol in code returned from lowering. Expected all symbols to have been resolved to GlobalRef or slots.");
- }
- if (jl_is_globalref(expr)) {
- jl_maybe_add_binding_backedge((jl_globalref_t*)expr, module, binding_edge);
- return expr;
+ jl_errorf("Found raw symbol %s in code returned from lowering. Expected all symbols to have been resolved to GlobalRef or slots.",
+ jl_symbol_name((jl_sym_t*)expr));
}
+
if (!jl_is_expr(expr)) {
return expr;
}
@@ -62,6 +99,11 @@ static jl_value_t *resolve_definition_effects(jl_value_t *expr, jl_module_t *mod
jl_eval_global_expr(module, e, 1);
return jl_nothing;
}
+ if (e->head == jl_globaldecl_sym && binding_effects) {
+ assert(jl_expr_nargs(e) == 1);
+ jl_declare_global(module, jl_exprarg(e, 0), NULL, 1);
+ return jl_nothing;
+ }
// These exprs are not fully linearized
if (e->head == jl_assign_sym) {
jl_exprargset(e, 1, resolve_definition_effects(jl_exprarg(e, 1), module, sparam_vals, binding_edge, binding_effects, eager_resolve));
@@ -133,7 +175,7 @@ static jl_value_t *resolve_definition_effects(jl_value_t *expr, jl_module_t *mod
return expr;
}
if (e->head == jl_foreigncall_sym) {
- JL_NARGSV(ccall method definition, 5); // (fptr, rt, at, nreq, (cc, effects))
+ JL_NARGSV(ccall method definition, 5); // (fptr, rt, at, nreq, (cc, effects, gc_safe))
jl_task_t *ct = jl_current_task;
jl_value_t *rt = jl_exprarg(e, 1);
jl_value_t *at = jl_exprarg(e, 2);
@@ -167,11 +209,12 @@ static jl_value_t *resolve_definition_effects(jl_value_t *expr, jl_module_t *mod
jl_value_t *cc = jl_quotenode_value(jl_exprarg(e, 4));
if (!jl_is_symbol(cc)) {
JL_TYPECHK(ccall method definition, tuple, cc);
- if (jl_nfields(cc) != 2) {
+ if (jl_nfields(cc) != 3) {
jl_error("In ccall calling convention, expected two argument tuple or symbol.");
}
JL_TYPECHK(ccall method definition, symbol, jl_get_nth_field(cc, 0));
JL_TYPECHK(ccall method definition, uint16, jl_get_nth_field(cc, 1));
+ JL_TYPECHK(ccall method definition, bool, jl_get_nth_field(cc, 2));
}
}
if (e->head == jl_call_sym && nargs > 0 &&
@@ -180,26 +223,24 @@ static jl_value_t *resolve_definition_effects(jl_value_t *expr, jl_module_t *mod
jl_value_t *fe = jl_exprarg(e, 0);
jl_module_t *fe_mod = jl_globalref_mod(fe);
jl_sym_t *fe_sym = jl_globalref_name(fe);
- if (jl_binding_resolved_p(fe_mod, fe_sym)) {
- // look at some known called functions
- jl_binding_t *b = jl_get_binding(fe_mod, fe_sym);
- if (jl_get_binding_value_if_const(b) == jl_builtin_tuple) {
- size_t j;
- for (j = 1; j < nargs; j++) {
- if (!jl_is_quotenode(jl_exprarg(e, j)))
- break;
+ // look at some known called functions
+ jl_binding_t *b = jl_get_binding(fe_mod, fe_sym);
+ if (jl_get_binding_value_if_const(b) == jl_builtin_tuple) {
+ size_t j;
+ for (j = 1; j < nargs; j++) {
+ if (!jl_is_quotenode(jl_exprarg(e, j)))
+ break;
+ }
+ if (j == nargs) {
+ jl_value_t *val = NULL;
+ JL_TRY {
+ val = jl_interpret_toplevel_expr_in(module, (jl_value_t*)e, NULL, sparam_vals);
}
- if (j == nargs) {
- jl_value_t *val = NULL;
- JL_TRY {
- val = jl_interpret_toplevel_expr_in(module, (jl_value_t*)e, NULL, sparam_vals);
- }
- JL_CATCH {
- val = NULL; // To make the analyzer happy see #define JL_TRY
- }
- if (val)
- return val;
+ JL_CATCH {
+ val = NULL; // To make the analyzer happy see #define JL_TRY
}
+ if (val)
+ return val;
}
}
}
@@ -482,8 +523,17 @@ jl_code_info_t *jl_new_code_info_from_ir(jl_expr_t *ir)
is_flag_stmt = 1;
else if (jl_is_expr(st) && ((jl_expr_t*)st)->head == jl_return_sym)
jl_array_ptr_set(body, j, jl_new_struct(jl_returnnode_type, jl_exprarg(st, 0)));
- else if (jl_is_expr(st) && (((jl_expr_t*)st)->head == jl_foreigncall_sym || ((jl_expr_t*)st)->head == jl_cfunction_sym))
- li->has_fcall = 1;
+ else if (jl_is_globalref(st)) {
+ jl_globalref_t *gr = (jl_globalref_t*)st;
+ if (jl_object_in_image((jl_value_t*)gr->mod))
+ li->has_image_globalref = 1;
+ }
+ else {
+ if (jl_is_expr(st) && ((jl_expr_t*)st)->head == jl_assign_sym)
+ st = jl_exprarg(st, 1);
+ if (jl_is_expr(st) && (((jl_expr_t*)st)->head == jl_foreigncall_sym || ((jl_expr_t*)st)->head == jl_cfunction_sym))
+ li->has_fcall = 1;
+ }
if (is_flag_stmt)
jl_array_uint32_set(li->ssaflags, j, 0);
else {
@@ -585,6 +635,7 @@ JL_DLLEXPORT jl_code_info_t *jl_new_code_info_uninit(void)
src->max_world = ~(size_t)0;
src->propagate_inbounds = 0;
src->has_fcall = 0;
+ src->has_image_globalref = 0;
src->nospecializeinfer = 0;
src->constprop = 0;
src->inlining = 0;
@@ -677,7 +728,7 @@ JL_DLLEXPORT jl_code_info_t *jl_code_for_staged(jl_method_instance_t *mi, size_t
JL_TRY {
ct->ptls->in_pure_callback = 1;
ct->world_age = jl_atomic_load_relaxed(&def->primary_world);
- if (ct->world_age > jl_atomic_load_acquire(&jl_world_counter) || jl_atomic_load_relaxed(&def->deleted_world) < ct->world_age)
+ if (ct->world_age > jl_atomic_load_acquire(&jl_world_counter))
jl_error("The generator method cannot run until it is added to a method table.");
// invoke code generator
@@ -956,9 +1007,10 @@ JL_DLLEXPORT jl_method_t *jl_new_method_uninit(jl_module_t *module)
m->isva = 0;
m->nargs = 0;
jl_atomic_store_relaxed(&m->primary_world, ~(size_t)0);
- jl_atomic_store_relaxed(&m->deleted_world, 1);
+ jl_atomic_store_relaxed(&m->dispatch_status, 0);
m->is_for_opaque_closure = 0;
m->nospecializeinfer = 0;
+ jl_atomic_store_relaxed(&m->did_scan_source, 0);
m->constprop = 0;
m->purity.bits = 0;
m->max_varargs = UINT8_MAX;
@@ -976,7 +1028,7 @@ JL_DLLEXPORT jl_method_t *jl_new_method_uninit(jl_module_t *module)
int get_next_edge(jl_array_t *list, int i, jl_value_t** invokesig, jl_code_instance_t **caller) JL_NOTSAFEPOINT
{
jl_value_t *item = jl_array_ptr_ref(list, i);
- if (jl_is_code_instance(item)) {
+ if (!item || jl_is_code_instance(item)) {
// Not an `invoke` call, it's just the CodeInstance
if (invokesig != NULL)
*invokesig = NULL;
@@ -1001,6 +1053,14 @@ int set_next_edge(jl_array_t *list, int i, jl_value_t *invokesig, jl_code_instan
return i;
}
+int clear_next_edge(jl_array_t *list, int i, jl_value_t *invokesig, jl_code_instance_t *caller)
+{
+ if (invokesig)
+ jl_array_ptr_set(list, i++, NULL);
+ jl_array_ptr_set(list, i++, NULL);
+ return i;
+}
+
void push_edge(jl_array_t *list, jl_value_t *invokesig, jl_code_instance_t *caller)
{
if (invokesig)
@@ -1009,6 +1069,33 @@ void push_edge(jl_array_t *list, jl_value_t *invokesig, jl_code_instance_t *call
return;
}
+void jl_mi_done_backedges(jl_method_instance_t *mi JL_PROPAGATES_ROOT, uint8_t old_flags) {
+ uint8_t flags_now = 0;
+ jl_array_t *backedges = jl_mi_get_backedges_mutate(mi, &flags_now);
+ if (backedges && !old_flags) {
+ if (flags_now & MI_FLAG_BACKEDGES_DIRTY) {
+ size_t n = jl_array_nrows(backedges);
+ size_t i = 0;
+ size_t insb = 0;
+ while (i < n) {
+ jl_value_t *invokesig;
+ jl_code_instance_t *caller;
+ i = get_next_edge(backedges, i, &invokesig, &caller);
+ if (!caller)
+ continue;
+ insb = set_next_edge(backedges, insb, invokesig, caller);
+ }
+ if (insb == n) {
+ // All were deleted
+ mi->backedges = NULL;
+ } else {
+ jl_array_del_end(backedges, n - insb);
+ }
+ }
+ jl_atomic_fetch_and_relaxed(&mi->flags, ~MI_FLAG_BACKEDGES_ALL);
+ }
+}
+
// method definition ----------------------------------------------------------
jl_method_t *jl_make_opaque_closure_method(jl_module_t *module, jl_value_t *name,
@@ -1047,28 +1134,22 @@ JL_DLLEXPORT void jl_check_gf(jl_value_t *gf, jl_sym_t *name)
jl_errorf("cannot define function %s; it already has a value", jl_symbol_name(name));
}
-JL_DLLEXPORT jl_value_t *jl_declare_const_gf(jl_binding_t *b, jl_module_t *mod, jl_sym_t *name)
+JL_DLLEXPORT jl_value_t *jl_declare_const_gf(jl_module_t *mod, jl_sym_t *name)
{
JL_LOCK(&world_counter_lock);
size_t new_world = jl_atomic_load_relaxed(&jl_world_counter) + 1;
- jl_binding_partition_t *bpart = jl_get_binding_partition(b, new_world);
- jl_ptr_kind_union_t pku = jl_walk_binding_inplace(&b, &bpart, new_world);
- jl_value_t *gf = NULL;
- if (!jl_bkind_is_some_guard(decode_restriction_kind(pku))) {
- if (jl_bkind_is_some_constant(decode_restriction_kind(pku))) {
- gf = decode_restriction_value(pku);
- JL_GC_PROMISE_ROOTED(gf);
- jl_check_gf(gf, b->globalref->name);
- JL_UNLOCK(&world_counter_lock);
- return gf;
- }
- jl_errorf("cannot define function %s; it already has a value", jl_symbol_name(name));
+ jl_binding_t *b = jl_get_module_binding(mod, name, 1);
+ jl_value_t *gf = jl_get_existing_strong_gf(b, new_world);
+ if (gf) {
+ jl_check_gf(gf, name);
+ JL_UNLOCK(&world_counter_lock);
+ return gf;
}
gf = (jl_value_t*)jl_new_generic_function(name, mod, new_world);
// From this point on (if we didn't error), we're committed to raising the world age,
// because we've used it to declare the type name.
+ jl_declare_constant_val3(b, mod, name, gf, PARTITION_KIND_CONST, new_world);
jl_atomic_store_release(&jl_world_counter, new_world);
- jl_declare_constant_val3(b, mod, name, gf, BINDING_KIND_CONST, new_world);
JL_GC_PROMISE_ROOTED(gf);
JL_UNLOCK(&world_counter_lock);
return gf;
@@ -1261,6 +1342,135 @@ JL_DLLEXPORT jl_method_t* jl_method_def(jl_svec_t *argdata,
return m;
}
+void jl_ctor_def(jl_value_t *ty, jl_value_t *functionloc)
+{
+ jl_datatype_t *dt = (jl_datatype_t*)jl_unwrap_unionall(ty);
+ JL_TYPECHK(ctor_def, datatype, (jl_value_t*)dt);
+ JL_TYPECHK(ctor_def, linenumbernode, functionloc);
+ jl_svec_t *fieldtypes = jl_get_fieldtypes(dt);
+ size_t nfields = jl_svec_len(fieldtypes);
+ size_t nparams = jl_subtype_env_size(ty);
+ jl_module_t *inmodule = dt->name->module;
+ jl_sym_t *file = (jl_sym_t*)jl_linenode_file(functionloc);
+ if (!jl_is_symbol(file))
+ file = jl_empty_sym;
+ int32_t line = jl_linenode_line(functionloc);
+
+ // argdata is svec(svec(types...), svec(typevars...), functionloc)
+ jl_svec_t *argdata = jl_alloc_svec(3);
+ jl_array_t *fieldkinds = NULL;
+ jl_code_info_t *body = NULL;
+ JL_GC_PUSH3(&argdata, &fieldkinds, &body);
+ jl_svecset(argdata, 2, functionloc);
+ jl_svec_t *tvars = jl_alloc_svec(nparams);
+ jl_svecset(argdata, 1, tvars);
+ jl_unionall_t *ua = (jl_unionall_t*)ty;
+ for (size_t i = 0; i < nparams; i++) {
+ assert(jl_is_unionall(ua));
+ jl_svecset(tvars, i, ua->var);
+ ua = (jl_unionall_t*)ua->body;
+ }
+ jl_svec_t *names = dt->name->names;
+
+ // define outer constructor (if all typevars are present (thus not definitely unconstrained) by the fields or other typevars which themselves are constrained)
+ int constrains_all_tvars = 1;
+ for (size_t i = nparams; i > 0; i--) {
+ jl_tvar_t *tv = (jl_tvar_t*)jl_svecref(tvars, i - 1);
+ int constrains_tvar = 0;
+ for (size_t i = 0; i < nfields; i++) {
+ jl_value_t *ft = jl_svecref(fieldtypes, i);
+ if (jl_has_typevar(ft, tv)) {
+ constrains_tvar = 1;
+ break;
+ }
+ }
+ for (size_t j = i; j < nparams; j++) {
+ jl_tvar_t *tv2 = (jl_tvar_t*)jl_svecref(tvars, j);
+ if (jl_has_typevar(tv2->ub, tv)) { // lb doesn't constrain, but jl_has_typevar doesn't have a way to specify that we care about may-constrain and not merely containment
+ constrains_tvar = 1;
+ break;
+ }
+ if (tv2 == tv) {
+ constrains_tvar = 0;
+ break;
+ }
+ }
+ if (!constrains_tvar) {
+ constrains_all_tvars = 0;
+ break;
+ }
+ }
+ if (constrains_all_tvars) {
+ jl_svec_t *atypes = jl_alloc_svec(nfields + 1);
+ jl_svecset(argdata, 0, atypes);
+ jl_svecset(atypes, 0, jl_wrap_Type(ty));
+ for (size_t i = 0; i < nfields; i++) {
+ jl_value_t *ft = jl_svecref(fieldtypes, i);
+ jl_svecset(atypes, i + 1, ft);
+ }
+ body = jl_outer_ctor_body(ty, nfields, nparams, inmodule, jl_symbol_name(file), line);
+ if (names) {
+ jl_array_t *slotnames = body->slotnames;
+ for (size_t i = 0; i < nfields; i++) {
+ jl_array_ptr_set(slotnames, i + 1, jl_svecref(names, i));
+ }
+ }
+ jl_method_def(argdata, NULL, body, inmodule);
+ if (nparams == 0) {
+ int all_Any = 1; // check if all fields are Any and the type is not parameterized, since inner constructor would be the same signature and code
+ for (size_t i = 0; i < nfields; i++) {
+ jl_value_t *ft = jl_svecref(fieldtypes, i);
+ if (ft != (jl_value_t*)jl_any_type) {
+ all_Any = 0;
+ break;
+ }
+ }
+ if (all_Any) {
+ JL_GC_POP();
+ return;
+ }
+ }
+ }
+
+ // define inner constructor
+ jl_svec_t *atypes = jl_svec_fill(nfields + 1, (jl_value_t*)jl_any_type);
+ jl_svecset(argdata, 0, atypes);
+ jl_value_t *typedt = (jl_value_t*)jl_wrap_Type((jl_value_t*)dt);
+ jl_svecset(atypes, 0, typedt);
+ fieldkinds = jl_alloc_vec_any(nfields);
+ for (size_t i = 0; i < nfields; i++) {
+ jl_value_t *ft = jl_svecref(fieldtypes, i);
+ int kind = ft == (jl_value_t*)jl_any_type ? -1 : 0;
+ // TODO: if more efficient to do so, we could reference the sparam instead of fieldtype
+ //if (jl_is_typevar(ft)) {
+ // for (size_t i = 0; i < nparams; i++) {
+ // if (jl_svecref(tvars, i) == ft) {
+ // kind = i + 1;
+ // break; // if repeated, must consider only the innermost
+ // }
+ // }
+ //}
+ jl_array_ptr_set(fieldkinds, i, jl_box_long(kind));
+ }
+ // rewrap_unionall(Type{dt}, ty)
+ for (size_t i = nparams; i > 0; i--) {
+ jl_value_t *tv = jl_svecref(tvars, i - 1);
+ typedt = jl_new_struct(jl_unionall_type, tv, typedt);
+ jl_svecset(atypes, 0, typedt);
+ }
+ tvars = jl_emptysvec;
+ jl_svecset(argdata, 1, tvars);
+ body = jl_inner_ctor_body(fieldkinds, inmodule, jl_symbol_name(file), line);
+ if (names) {
+ jl_array_t *slotnames = body->slotnames;
+ for (size_t i = 0; i < nfields; i++) {
+ jl_array_ptr_set(slotnames, i + 1, jl_svecref(names, i));
+ }
+ }
+ jl_method_def(argdata, NULL, body, inmodule);
+ JL_GC_POP();
+}
+
// root blocks
// This section handles method roots. Roots are GC-preserved items needed to
diff --git a/src/module.c b/src/module.c
index b2a4018519fca..9a456de0b11d8 100644
--- a/src/module.c
+++ b/src/module.c
@@ -14,52 +14,400 @@ extern "C" {
// In this translation unit and this translation unit only emit this symbol `extern` for use by julia
EXTERN_INLINE_DEFINE uint8_t jl_bpart_get_kind(jl_binding_partition_t *bpart) JL_NOTSAFEPOINT;
-extern inline enum jl_partition_kind decode_restriction_kind(jl_ptr_kind_union_t pku) JL_NOTSAFEPOINT;
static jl_binding_partition_t *new_binding_partition(void)
{
jl_binding_partition_t *bpart = (jl_binding_partition_t*)jl_gc_alloc(jl_current_task->ptls, sizeof(jl_binding_partition_t), jl_binding_partition_type);
- jl_atomic_store_relaxed(&bpart->restriction, encode_restriction(NULL, BINDING_KIND_GUARD));
- bpart->min_world = 0;
+ bpart->restriction = NULL;
+ bpart->kind = (size_t)PARTITION_KIND_GUARD;
+ jl_atomic_store_relaxed(&bpart->min_world, 0);
jl_atomic_store_relaxed(&bpart->max_world, (size_t)-1);
jl_atomic_store_relaxed(&bpart->next, NULL);
-#ifdef _P64
- bpart->reserved = 0;
-#endif
return bpart;
}
-jl_binding_partition_t *jl_get_binding_partition(jl_binding_t *b, size_t world) {
- if (!b)
+struct implicit_search_gap {
+ _Atomic(jl_binding_partition_t *) *insert;
+ jl_binding_partition_t *replace;
+ jl_value_t *parent;
+
+ size_t min_world;
+ size_t max_world;
+ size_t inherited_flags;
+};
+
+STATIC_INLINE jl_binding_partition_t *jl_get_binding_partition__(jl_binding_t *b JL_PROPAGATES_ROOT, size_t world, struct implicit_search_gap *gap) JL_GLOBALLY_ROOTED
+{
+ // Iterate through the list of binding partitions, keeping track of where to insert a new one for an implicit
+ // resolution if necessary.
+ while (gap->replace) {
+ size_t replace_min_world = jl_atomic_load_relaxed(&gap->replace->min_world);
+ if (world >= replace_min_world)
+ break;
+ gap->insert = &gap->replace->next;
+ gap->max_world = replace_min_world - 1;
+ gap->parent = (jl_value_t*)gap->replace;
+ gap->replace = jl_atomic_load_relaxed(gap->insert);
+ }
+ if (gap->replace && world <= jl_atomic_load_relaxed(&gap->replace->max_world)) {
+ return gap->replace;
+ }
+ gap->min_world = gap->replace ? jl_atomic_load_relaxed(&gap->replace->max_world) + 1 : 0;
+ if (gap->replace)
+ gap->inherited_flags = gap->replace->kind & PARTITION_MASK_FLAG;
+ else
+ gap->inherited_flags = 0;
+ return NULL;
+}
+
+STATIC_INLINE jl_binding_partition_t *jl_get_binding_partition_if_present(jl_binding_t *b JL_PROPAGATES_ROOT, size_t world, struct implicit_search_gap *gap)
+{
+ gap->parent = (jl_value_t*)b;
+ gap->insert = &b->partitions;
+ gap->replace = jl_atomic_load_relaxed(gap->insert);
+ gap->min_world = 0;
+ gap->max_world = ~(size_t)0;
+ gap->inherited_flags = 0;
+ return jl_get_binding_partition__(b, world, gap);
+}
+
+struct implicit_search_resolution {
+ enum jl_partition_kind ultimate_kind;
+ jl_value_t *binding_or_const;
+ size_t min_world;
+ size_t max_world;
+ int saw_cycle;
+ //// Not semantic, but used for reflection.
+ // If non-null, the unique module from which this binding was imported
+ jl_module_t *debug_only_import_from;
+ // If non-null, the unique binding imported. For PARTITION_KIND_IMPLICIT_GLOBAL, always matches binding_or_const.
+ // Must have trust_cache = 0.
+ jl_binding_t *debug_only_ultimate_binding;
+};
+
+static size_t WORLDMAX(size_t a, size_t b) { return a > b ? a : b; }
+static size_t WORLDMIN(size_t a, size_t b) { return a > b ? b : a; }
+
+static void update_implicit_resolution(struct implicit_search_resolution *to_update, struct implicit_search_resolution resolution)
+{
+ to_update->min_world = WORLDMAX(to_update->min_world, resolution.min_world);
+ to_update->max_world = WORLDMIN(to_update->max_world, resolution.max_world);
+ to_update->saw_cycle |= resolution.saw_cycle;
+ if (resolution.ultimate_kind == PARTITION_FAKE_KIND_CYCLE) {
+ // Cycles get ignored. This causes the resolution resolution to only be partial, so we can't
+ // cache it. This gets tracked in saw_cycle;
+ to_update->saw_cycle = 1;
+ return;
+ }
+ if (resolution.ultimate_kind == PARTITION_KIND_GUARD) {
+ // Ignore guard imports
+ return;
+ }
+ if (to_update->ultimate_kind == PARTITION_KIND_GUARD) {
+ assert(resolution.binding_or_const);
+ to_update->ultimate_kind = resolution.ultimate_kind;
+ to_update->binding_or_const = resolution.binding_or_const;
+ to_update->debug_only_import_from = resolution.debug_only_import_from;
+ to_update->debug_only_ultimate_binding = resolution.debug_only_ultimate_binding;
+ return;
+ }
+ if (resolution.ultimate_kind == to_update->ultimate_kind &&
+ resolution.binding_or_const == to_update->binding_or_const) {
+ if (resolution.debug_only_import_from != to_update->debug_only_import_from) {
+ to_update->debug_only_import_from = NULL;
+ }
+ if (resolution.debug_only_ultimate_binding != to_update->debug_only_ultimate_binding) {
+ to_update->debug_only_ultimate_binding = NULL;
+ }
+ return;
+ }
+ to_update->ultimate_kind = PARTITION_KIND_FAILED;
+ to_update->binding_or_const = NULL;
+ to_update->debug_only_import_from = NULL;
+ to_update->debug_only_ultimate_binding = NULL;
+}
+
+static jl_binding_partition_t *jl_implicit_import_resolved(jl_binding_t *b, struct implicit_search_gap gap, struct implicit_search_resolution resolution)
+{
+ size_t new_kind = resolution.ultimate_kind | gap.inherited_flags;
+ size_t new_max_world = gap.max_world < resolution.max_world ? gap.max_world : resolution.max_world;
+ size_t new_min_world = gap.min_world > resolution.min_world ? gap.min_world : resolution.min_world;
+ jl_binding_partition_t *next = gap.replace;
+ if (jl_is_binding_partition(gap.parent)) {
+ // Check if we can merge this into the previous binding partition
+ jl_binding_partition_t *prev = (jl_binding_partition_t *)gap.parent;
+ assert(new_max_world != ~(size_t)0); // It is inconsistent to have a gap with `gap.parent` set, but max_world == ~(size_t)0
+ size_t expected_prev_min_world = new_max_world + 1;
+ if (prev->restriction == resolution.binding_or_const && prev->kind == new_kind) {
+retry:
+ if (!jl_atomic_cmpswap(&prev->min_world, &expected_prev_min_world, new_min_world)) {
+ if (expected_prev_min_world <= new_min_world) {
+ return prev;
+ }
+ else if (expected_prev_min_world <= new_max_world) {
+ // Concurrent modification of the partition. However, our lookup is still valid,
+ // so we should still be able to extend the partition.
+ goto retry;
+ }
+ // There remains a gap - proceed
+ } else {
+ if (next) {
+ size_t next_min_world = jl_atomic_load_relaxed(&next->min_world);
+ expected_prev_min_world = new_min_world;
+ for (;;) {
+ // We've updated the previous partition - check if we've closed a gap
+ size_t next_max_world = jl_atomic_load_relaxed(&next->max_world);
+ if (next_max_world >= expected_prev_min_world-1 && next->kind == new_kind && next->restriction == resolution.binding_or_const) {
+ if (jl_atomic_cmpswap(&prev->min_world, &expected_prev_min_world, next_min_world)) {
+ jl_binding_partition_t *nextnext = jl_atomic_load_relaxed(&next->next);
+ if (!jl_atomic_cmpswap(&prev->next, &next, nextnext)) {
+ // `next` may have been merged into its subsequent partition - we need to retry
+ assert(next);
+ continue;
+ }
+ // N.B.: This can lose modifications to next->{min_world, next}.
+ // However, those modifications could only have been for another implicit
+ // partition, so we are ok to lose them and recompute them later if necessary.
+ }
+ assert(expected_prev_min_world <= new_min_world);
+ }
+ break;
+ }
+ }
+ return prev;
+ }
+ }
+ }
+ jl_binding_partition_t *new_bpart = new_binding_partition();
+ jl_atomic_store_relaxed(&new_bpart->max_world, new_max_world);
+ new_bpart->kind = new_kind;
+ new_bpart->restriction = resolution.binding_or_const;
+ jl_gc_wb_fresh(new_bpart, new_bpart->restriction);
+
+ if (next) {
+ // See if we can merge the next partition into this one
+ size_t next_max_world = jl_atomic_load_relaxed(&next->max_world);
+ if (next_max_world == new_min_world - 1 && next->kind == new_kind && next->restriction == resolution.binding_or_const) {
+ // See above for potentially losing modifications to next.
+ new_min_world = jl_atomic_load_acquire(&next->min_world);
+ next = jl_atomic_load_relaxed(&next->next);
+ }
+ }
+
+ jl_atomic_store_relaxed(&new_bpart->min_world, new_min_world);
+ jl_atomic_store_relaxed(&new_bpart->next, next);
+ if (!jl_atomic_cmpswap(gap.insert, &gap.replace, new_bpart))
return NULL;
- assert(jl_is_binding(b));
- jl_value_t *parent = (jl_value_t*)b;
- _Atomic(jl_binding_partition_t *)*insert = &b->partitions;
- jl_binding_partition_t *bpart = jl_atomic_load_relaxed(insert);
- size_t max_world = (size_t)-1;
- jl_binding_partition_t *new_bpart = NULL;
+ jl_gc_wb(gap.parent, new_bpart);
+ return new_bpart;
+}
+
+// find a binding from a module's `usings` list
+struct implicit_search_resolution jl_resolve_implicit_import(jl_binding_t *b, modstack_t *st, size_t world, int trust_cache)
+{
+ // First check if we've hit a cycle in this resolution
+ {
+ modstack_t *tmp = st;
+ for (; tmp != NULL; tmp = tmp->prev) {
+ if (tmp->b == b) {
+ return (struct implicit_search_resolution){ PARTITION_FAKE_KIND_CYCLE, NULL, 0, ~(size_t)0, 1, NULL, NULL };
+ }
+ }
+ }
+
+ jl_module_t *m = b->globalref->mod;
+ jl_sym_t *var = b->globalref->name;
+
+ modstack_t top = { b, st };
+ struct implicit_search_resolution impstate;
+ struct implicit_search_resolution depimpstate;
+ size_t min_world = 0;
+ size_t max_world = ~(size_t)0;
+ impstate = depimpstate = (struct implicit_search_resolution){ PARTITION_KIND_GUARD, NULL, min_world, max_world, 0, NULL, NULL };
+
+ JL_LOCK(&m->lock);
+ int i = (int)module_usings_length(m) - 1;
+ JL_UNLOCK(&m->lock);
+ for (; i >= 0 && impstate.ultimate_kind != PARTITION_KIND_FAILED; --i) {
+ JL_LOCK(&m->lock);
+ struct _jl_module_using data = *module_usings_getidx(m, i);
+ JL_UNLOCK(&m->lock);
+ if (data.min_world > world) {
+ max_world = WORLDMIN(max_world, data.min_world - 1);
+ continue;
+ }
+ if (data.max_world < world) {
+ min_world = WORLDMAX(min_world, data.max_world + 1);
+ continue;
+ }
+
+ min_world = WORLDMAX(min_world, data.min_world);
+ max_world = WORLDMIN(max_world, data.max_world);
+
+ jl_module_t *imp = data.mod;
+ JL_GC_PROMISE_ROOTED(imp);
+ jl_binding_t *tempb = jl_get_module_binding(imp, var, 0);
+ if (!tempb) {
+ // If the binding has never been allocated, it could not have been marked exported, so
+ // it is irrelevant for our resolution. We can move on.
+ continue;
+ }
+
+ struct implicit_search_gap gap;
+ jl_binding_partition_t *tempbpart = jl_get_binding_partition_if_present(tempb, world, &gap);
+ size_t tempbpart_flags = tempbpart ? (tempbpart->kind & PARTITION_MASK_FLAG) : gap.inherited_flags;
+
+ while (tempbpart && jl_bkind_is_some_explicit_import(jl_binding_kind(tempbpart))) {
+ max_world = WORLDMIN(max_world, jl_atomic_load_relaxed(&tempbpart->max_world));
+ min_world = WORLDMAX(min_world, jl_atomic_load_relaxed(&tempbpart->min_world));
+
+ tempb = (jl_binding_t*)tempbpart->restriction;
+ tempbpart = jl_get_binding_partition_if_present(tempb, world, &gap);
+ }
+
+ int tempbpart_valid = tempbpart && (trust_cache || !jl_bkind_is_some_implicit(jl_binding_kind(tempbpart)));
+ size_t tembppart_max_world = tempbpart_valid ? jl_atomic_load_relaxed(&tempbpart->max_world) : gap.max_world;
+ size_t tembppart_min_world = tempbpart ? WORLDMAX(jl_atomic_load_relaxed(&tempbpart->min_world), gap.min_world) : gap.min_world;
+
+ max_world = WORLDMIN(max_world, tembppart_max_world);
+ min_world = WORLDMAX(min_world, tembppart_min_world);
+
+ if (!(tempbpart_flags & PARTITION_FLAG_EXPORTED)) {
+ // Partition not exported - skip.
+ continue;
+ }
+
+ struct implicit_search_resolution *comparison = &impstate;
+ if (impstate.ultimate_kind != PARTITION_KIND_GUARD) {
+ if (tempbpart_flags & PARTITION_FLAG_DEPRECATED) {
+ // Deprecated, but we already have a non-deprecated binding for this - skip.
+ continue;
+ }
+ } else if (tempbpart_flags & PARTITION_FLAG_DEPRECATED) {
+ if (depimpstate.ultimate_kind == PARTITION_KIND_FAILED) {
+ // We've already decided that the deprecated bindings are ambiguous, so skip this, but
+ // keep going to look for non-deprecated bindings.
+ continue;
+ }
+ comparison = &depimpstate;
+ }
+
+ struct implicit_search_resolution imp_resolution = { PARTITION_KIND_GUARD, NULL, min_world, max_world, 0, NULL, NULL };
+ if (!tempbpart_valid) {
+ imp_resolution = jl_resolve_implicit_import(tempb, &top, world, trust_cache);
+ } else {
+ enum jl_partition_kind kind = jl_binding_kind(tempbpart);
+ if (kind == PARTITION_KIND_IMPLICIT_GLOBAL) {
+ imp_resolution.binding_or_const = tempbpart->restriction;
+ imp_resolution.debug_only_ultimate_binding = (jl_binding_t*)tempbpart->restriction;
+ imp_resolution.ultimate_kind = PARTITION_KIND_IMPLICIT_GLOBAL;
+ } else if (kind == PARTITION_KIND_GLOBAL || kind == PARTITION_KIND_DECLARED || kind == PARTITION_KIND_BACKDATED_CONST) {
+ imp_resolution.binding_or_const = (jl_value_t *)tempb;
+ imp_resolution.debug_only_ultimate_binding = tempb;
+ imp_resolution.ultimate_kind = PARTITION_KIND_IMPLICIT_GLOBAL;
+ } else if (jl_bkind_is_defined_constant(kind)) {
+ assert(tempbpart->restriction);
+ imp_resolution.binding_or_const = tempbpart->restriction;
+ imp_resolution.debug_only_ultimate_binding = tempb;
+ imp_resolution.ultimate_kind = PARTITION_KIND_IMPLICIT_CONST;
+ }
+ }
+ imp_resolution.debug_only_import_from = imp;
+ update_implicit_resolution(comparison, imp_resolution);
+
+ if (!tempbpart && !imp_resolution.saw_cycle) {
+ // Independent of whether or not we trust the cache, we have independently computed the implicit resolution
+ // for this import, so we can put it in the cache.
+ jl_implicit_import_resolved(tempb, gap, imp_resolution);
+ }
+ }
+
+ if (impstate.ultimate_kind == PARTITION_KIND_GUARD && depimpstate.ultimate_kind != PARTITION_KIND_GUARD) {
+ depimpstate.min_world = WORLDMAX(depimpstate.min_world, min_world);
+ depimpstate.max_world = WORLDMIN(depimpstate.max_world, max_world);
+ return depimpstate;
+ }
+ impstate.min_world = WORLDMAX(impstate.min_world, min_world);
+ impstate.max_world = WORLDMIN(impstate.max_world, max_world);
+ return impstate;
+}
+
+JL_DLLEXPORT jl_binding_partition_t *jl_maybe_reresolve_implicit(jl_binding_t *b, size_t new_max_world)
+{
+ struct implicit_search_gap gap;
while (1) {
- while (bpart && world < bpart->min_world) {
- insert = &bpart->next;
- max_world = bpart->min_world - 1;
- parent = (jl_value_t *)bpart;
- bpart = jl_atomic_load_relaxed(&bpart->next);
+ jl_binding_partition_t *bpart = jl_get_binding_partition_if_present(b, new_max_world+1, &gap);
+ assert(bpart == jl_atomic_load_relaxed(&b->partitions));
+ assert(bpart);
+ struct implicit_search_resolution resolution = jl_resolve_implicit_import(b, NULL, new_max_world+1, 0);
+ int resolution_unchanged = bpart->restriction == resolution.binding_or_const && jl_binding_kind(bpart) == resolution.ultimate_kind;
+ size_t bpart_min_world = jl_atomic_load_relaxed(&bpart->min_world);
+ if (resolution.min_world == bpart_min_world) {
+ // The resolution has the same world bounds - it must be unchanged
+ assert(resolution_unchanged);
+ return bpart;
+ } else if (resolution_unchanged) {
+ // If the resolution is unchanged, we can still keep the bpart
+ assert(resolution.min_world > bpart_min_world);
+ return bpart;
+ }
+ assert(resolution.min_world == new_max_world+1 && "Missed an invalidation or bad resolution bounds");
+ size_t expected_max_world = ~(size_t)0;
+ if (jl_atomic_cmpswap(&bpart->max_world, &expected_max_world, new_max_world))
+ {
+ gap.min_world = new_max_world+1;
+ gap.inherited_flags = bpart->kind & PARTITION_MASK_FLAG;
+ jl_binding_partition_t *new_bpart = jl_implicit_import_resolved(b, gap, resolution);
+ if (new_bpart)
+ return new_bpart;
}
- if (bpart && world <= jl_atomic_load_relaxed(&bpart->max_world))
+ }
+}
+
+JL_DLLEXPORT void jl_update_loaded_bpart(jl_binding_t *b, jl_binding_partition_t *bpart)
+{
+ struct implicit_search_resolution resolution = jl_resolve_implicit_import(b, NULL, jl_atomic_load_relaxed(&jl_world_counter), 0);
+ jl_atomic_store_relaxed(&bpart->min_world, resolution.min_world);
+ jl_atomic_store_relaxed(&bpart->max_world, resolution.max_world);
+ bpart->restriction = resolution.binding_or_const;
+ bpart->kind = resolution.ultimate_kind;
+}
+
+STATIC_INLINE jl_binding_partition_t *jl_get_binding_partition_(jl_binding_t *b JL_PROPAGATES_ROOT, jl_value_t *parent, _Atomic(jl_binding_partition_t *)*insert, size_t world, size_t max_world, modstack_t *st) JL_GLOBALLY_ROOTED
+{
+ assert(jl_is_binding(b));
+ struct implicit_search_gap gap;
+ gap.parent = parent;
+ gap.insert = insert;
+ gap.inherited_flags = 0;
+ gap.min_world = 0;
+ gap.max_world = max_world;
+ while (1) {
+ gap.replace = jl_atomic_load_relaxed(gap.insert);
+ jl_binding_partition_t *bpart = jl_get_binding_partition__(b, world, &gap);
+ if (bpart)
return bpart;
- if (!new_bpart)
- new_bpart = new_binding_partition();
- jl_atomic_store_relaxed(&new_bpart->next, bpart);
- jl_gc_wb_fresh(new_bpart, bpart);
- new_bpart->min_world = bpart ? jl_atomic_load_relaxed(&bpart->max_world) + 1 : 0;
- jl_atomic_store_relaxed(&new_bpart->max_world, max_world);
- if (jl_atomic_cmpswap(insert, &bpart, new_bpart)) {
- jl_gc_wb(parent, new_bpart);
+ struct implicit_search_resolution resolution = jl_resolve_implicit_import(b, NULL, world, 1);
+ jl_binding_partition_t *new_bpart = jl_implicit_import_resolved(b, gap, resolution);
+ if (new_bpart)
return new_bpart;
- }
}
}
+jl_binding_partition_t *jl_get_binding_partition(jl_binding_t *b, size_t world) {
+ if (!b)
+ return NULL;
+ // Duplicate the code for the entry frame for branch prediction
+ return jl_get_binding_partition_(b, (jl_value_t*)b, &b->partitions, world, ~(size_t)0, NULL);
+}
+
+jl_binding_partition_t *jl_get_binding_partition_with_hint(jl_binding_t *b, jl_binding_partition_t *prev, size_t world) JL_GLOBALLY_ROOTED {
+ // Helper for getting a binding partition for an older world after we've already looked up the partition for a newer world
+ assert(b);
+ size_t prev_min_world = jl_atomic_load_relaxed(&prev->min_world);
+ return jl_get_binding_partition_(b, (jl_value_t*)prev, &prev->next, world, prev_min_world-1, NULL);
+}
+
jl_binding_partition_t *jl_get_binding_partition_all(jl_binding_t *b, size_t min_world, size_t max_world) {
if (!b)
return NULL;
@@ -71,7 +419,58 @@ jl_binding_partition_t *jl_get_binding_partition_all(jl_binding_t *b, size_t min
return bpart;
}
-JL_DLLEXPORT jl_module_t *jl_new_module_(jl_sym_t *name, jl_module_t *parent, uint8_t default_names)
+JL_DLLEXPORT int jl_get_binding_leaf_partitions_restriction_kind(jl_binding_t *b JL_PROPAGATES_ROOT, struct restriction_kind_pair *rkp, size_t min_world, size_t max_world) {
+ if (!b)
+ return 0;
+
+ int first = 1;
+ size_t validated_min_world = max_world == ~(size_t)0 ? ~(size_t)0 : max_world + 1;
+ jl_binding_partition_t *bpart = NULL;
+ int maybe_depwarn = 0;
+ while (validated_min_world > min_world) {
+ bpart = bpart ? jl_get_binding_partition_with_hint(b, bpart, validated_min_world - 1) :
+ jl_get_binding_partition(b, validated_min_world - 1);
+ size_t bpart_min_world = jl_atomic_load_relaxed(&bpart->min_world);
+ while (validated_min_world > min_world && validated_min_world > bpart_min_world) {
+ jl_binding_t *curb = b;
+ jl_binding_partition_t *curbpart = bpart;
+ size_t cur_min_world = bpart_min_world;
+ size_t cur_max_world = validated_min_world - 1;
+ jl_walk_binding_inplace_worlds(&curb, &curbpart, &cur_min_world, &cur_max_world, &maybe_depwarn, cur_max_world);
+ enum jl_partition_kind kind = jl_binding_kind(curbpart);
+ if (kind == PARTITION_KIND_IMPLICIT_CONST)
+ kind = PARTITION_KIND_CONST;
+ if (first == 1) {
+ rkp->kind = kind;
+ rkp->restriction = curbpart->restriction;
+ if (rkp->kind == PARTITION_KIND_GLOBAL || rkp->kind == PARTITION_KIND_DECLARED)
+ rkp->binding_if_global = curb;
+ first = 0;
+ } else {
+ if (kind != rkp->kind || curbpart->restriction != rkp->restriction)
+ return 0;
+ if ((rkp->kind == PARTITION_KIND_GLOBAL || rkp->kind == PARTITION_KIND_DECLARED) && rkp->binding_if_global != curb)
+ return 0;
+ }
+ validated_min_world = cur_min_world;
+ }
+ }
+ rkp->maybe_depwarn = maybe_depwarn;
+ return 1;
+}
+
+JL_DLLEXPORT jl_value_t *jl_get_binding_leaf_partitions_value_if_const(jl_binding_t *b JL_PROPAGATES_ROOT, int *maybe_depwarn, size_t min_world, size_t max_world) {
+ struct restriction_kind_pair rkp = { NULL, NULL, PARTITION_KIND_GUARD, 0 };
+ if (!jl_get_binding_leaf_partitions_restriction_kind(b, &rkp, min_world, max_world))
+ return NULL;
+ if (jl_bkind_is_some_constant(rkp.kind) && rkp.kind != PARTITION_KIND_BACKDATED_CONST) {
+ *maybe_depwarn = rkp.maybe_depwarn;
+ return rkp.restriction;
+ }
+ return NULL;
+}
+
+static jl_module_t *jl_new_module__(jl_sym_t *name, jl_module_t *parent)
{
jl_task_t *ct = jl_current_task;
const jl_uuid_t uuid_zero = {0, 0};
@@ -80,21 +479,24 @@ JL_DLLEXPORT jl_module_t *jl_new_module_(jl_sym_t *name, jl_module_t *parent, ui
jl_set_typetagof(m, jl_module_tag, 0);
assert(jl_is_symbol(name));
m->name = name;
- m->parent = parent;
+ m->parent = parent ? parent : m;
m->istopmod = 0;
m->uuid = uuid_zero;
static unsigned int mcounter; // simple counter backup, in case hrtime is not incrementing
- m->build_id.lo = jl_hrtime() + (++mcounter);
+ // TODO: this is used for ir decompression and is liable to hash collisions so use more of the bits
+ m->build_id.lo = bitmix(jl_hrtime() + (++mcounter), jl_rand());
if (!m->build_id.lo)
m->build_id.lo++; // build id 0 is invalid
m->build_id.hi = ~(uint64_t)0;
jl_atomic_store_relaxed(&m->counter, 1);
+ m->usings_backedges = jl_nothing;
+ m->scanned_methods = jl_nothing;
m->nospecialize = 0;
m->optlevel = -1;
m->compile = -1;
m->infer = -1;
m->max_methods = -1;
- m->file = name; // Using the name as a placeholder is better than nothing
+ m->file = jl_empty_sym;
m->line = 0;
m->hash = parent == NULL ? bitmix(name->hash, jl_module_type->hash) :
bitmix(name->hash, parent->hash);
@@ -102,20 +504,127 @@ JL_DLLEXPORT jl_module_t *jl_new_module_(jl_sym_t *name, jl_module_t *parent, ui
jl_atomic_store_relaxed(&m->bindings, jl_emptysvec);
jl_atomic_store_relaxed(&m->bindingkeyset, (jl_genericmemory_t*)jl_an_empty_memory_any);
arraylist_new(&m->usings, 0);
- if (jl_core_module && default_names) {
- JL_GC_PUSH1(&m);
- jl_module_using(m, jl_core_module);
- // export own name, so "using Foo" makes "Foo" itself visible
- jl_set_const(m, name, (jl_value_t*)m);
- jl_module_public(m, name, 1);
- JL_GC_POP();
+ return m;
+}
+
+static void jl_add_default_names(jl_module_t *m, uint8_t default_using_core, uint8_t self_name)
+{
+ if (jl_core_module) {
+ // Bootstrap: Before jl_core_module is defined, we don't have enough infrastructure
+ // for bindings, so Core itself gets special handling in jltypes.c
+ if (default_using_core) {
+ jl_module_initial_using(m, jl_core_module);
+ }
+ if (self_name) {
+ // export own name, so "using Foo" makes "Foo" itself visible
+ jl_set_initial_const(m, m->name, (jl_value_t*)m, 1);
+ }
}
+}
+
+jl_module_t *jl_new_module_(jl_sym_t *name, jl_module_t *parent, uint8_t default_using_core, uint8_t self_name)
+{
+ jl_module_t *m = jl_new_module__(name, parent);
+ JL_GC_PUSH1(&m);
+ jl_add_default_names(m, default_using_core, self_name);
+ JL_GC_POP();
return m;
}
+
+// Precondition: world_counter_lock is held
+JL_DLLEXPORT jl_binding_partition_t *jl_declare_constant_val3(
+ jl_binding_t *b, jl_module_t *mod, jl_sym_t *var, jl_value_t *val,
+ enum jl_partition_kind constant_kind, size_t new_world)
+{
+ jl_binding_partition_t *new_prev_bpart = NULL;
+ JL_GC_PUSH2(&val, &new_prev_bpart);
+ if (!b) {
+ b = jl_get_module_binding(mod, var, 1);
+ }
+ jl_binding_partition_t *new_bpart = NULL;
+ jl_binding_partition_t *bpart = jl_get_binding_partition(b, new_world);
+ while (!new_bpart) {
+ enum jl_partition_kind kind = jl_binding_kind(bpart);
+ if (jl_bkind_is_some_constant(kind) && !jl_bkind_is_some_implicit(kind)) {
+ if (!val) {
+ new_bpart = bpart;
+ break;
+ }
+ jl_value_t *old = bpart->restriction;
+ JL_GC_PROMISE_ROOTED(old);
+ if (val == old || (val && old && jl_egal(val, old))) {
+ new_bpart = bpart;
+ break;
+ }
+ } else if (jl_bkind_is_some_explicit_import(kind)) {
+ jl_errorf("cannot declare %s.%s constant; it was already declared as an import",
+ jl_symbol_name(mod->name), jl_symbol_name(var));
+ } else if (kind == PARTITION_KIND_GLOBAL) {
+ jl_errorf("cannot declare %s.%s constant; it was already declared global",
+ jl_symbol_name(mod->name), jl_symbol_name(var));
+ }
+ if (jl_atomic_load_relaxed(&bpart->min_world) == new_world) {
+ bpart->kind = constant_kind | (bpart->kind & PARTITION_MASK_FLAG);
+ bpart->restriction = val;
+ if (val)
+ jl_gc_wb(bpart, val);
+ new_bpart = bpart;
+ } else {
+ new_bpart = jl_replace_binding_locked(b, bpart, val, constant_kind, new_world);
+ }
+ int need_backdate = new_world && val;
+ if (need_backdate) {
+ // We will backdate as long as this partition was never explicitly
+ // declared const, global, or imported.
+ jl_binding_partition_t *prev_bpart = bpart;
+ for (;;) {
+ enum jl_partition_kind prev_kind = jl_binding_kind(prev_bpart);
+ if (jl_bkind_is_some_constant(prev_kind) || prev_kind == PARTITION_KIND_GLOBAL ||
+ (jl_bkind_is_some_import(prev_kind))) {
+ need_backdate = 0;
+ break;
+ }
+ size_t prev_bpart_min_world = jl_atomic_load_relaxed(&prev_bpart->min_world);
+ if (prev_bpart_min_world == 0)
+ break;
+ prev_bpart = jl_get_binding_partition(b, prev_bpart_min_world - 1);
+ }
+ }
+ // If backdate is required, replace each existing partition by a new one.
+ // We can't use one binding to cover the entire range, because we need to
+ // keep the flags partitioned.
+ if (need_backdate) {
+ jl_binding_partition_t *prev_bpart = bpart;
+ jl_binding_partition_t *backdate_bpart = new_binding_partition();
+ new_prev_bpart = backdate_bpart;
+ while (1) {
+ backdate_bpart->kind = (size_t)PARTITION_KIND_BACKDATED_CONST | (prev_bpart->kind & 0xf0);
+ backdate_bpart->restriction = val;
+ jl_atomic_store_relaxed(&backdate_bpart->min_world,
+ jl_atomic_load_relaxed(&prev_bpart->min_world));
+ jl_gc_wb_fresh(backdate_bpart, val);
+ jl_atomic_store_relaxed(&backdate_bpart->max_world,
+ jl_atomic_load_relaxed(&prev_bpart->max_world));
+ prev_bpart = jl_atomic_load_relaxed(&prev_bpart->next);
+ if (!prev_bpart)
+ break;
+ jl_binding_partition_t *next_prev_bpart = new_binding_partition();
+ jl_atomic_store_relaxed(&backdate_bpart->next, next_prev_bpart);
+ jl_gc_wb(backdate_bpart, next_prev_bpart);
+ backdate_bpart = next_prev_bpart;
+ }
+ jl_atomic_store_release(&new_bpart->next, new_prev_bpart);
+ jl_gc_wb(new_bpart, new_prev_bpart);
+ }
+ }
+ JL_GC_POP();
+ return new_bpart;
+}
+
JL_DLLEXPORT jl_module_t *jl_new_module(jl_sym_t *name, jl_module_t *parent)
{
- return jl_new_module_(name, parent, 1);
+ return jl_new_module_(name, parent, 1, 1);
}
uint32_t jl_module_next_counter(jl_module_t *m)
@@ -127,7 +636,7 @@ JL_DLLEXPORT jl_value_t *jl_f_new_module(jl_sym_t *name, uint8_t std_imports, ui
{
// TODO: should we prohibit this during incremental compilation?
// TODO: the parent module is a lie
- jl_module_t *m = jl_new_module_(name, jl_main_module, default_names);
+ jl_module_t *m = jl_new_module_(name, jl_main_module, default_names, default_names);
JL_GC_PUSH1(&m);
if (std_imports)
jl_add_standard_imports(m);
@@ -239,10 +748,7 @@ static jl_binding_t *new_binding(jl_module_t *mod, jl_sym_t *name)
jl_atomic_store_relaxed(&b->partitions, NULL);
b->globalref = NULL;
b->backedges = NULL;
- b->exportp = 0;
- b->publicp = 0;
- b->deprecated = 0;
- b->did_print_backdate_admonition = 0;
+ jl_atomic_store_relaxed(&b->flags, 0);
JL_GC_PUSH1(&b);
b->globalref = jl_new_globalref(mod, name, b);
jl_gc_wb(b, b->globalref);
@@ -252,58 +758,62 @@ static jl_binding_t *new_binding(jl_module_t *mod, jl_sym_t *name)
extern jl_mutex_t jl_modules_mutex;
-extern void check_safe_newbinding(jl_module_t *m, jl_sym_t *var)
+static int is_module_open(jl_module_t *m)
{
- if (jl_current_task->ptls->in_pure_callback)
- jl_errorf("new globals cannot be created in a generated function");
- if (jl_options.incremental && jl_generating_output()) {
- JL_LOCK(&jl_modules_mutex);
- int open = ptrhash_has(&jl_current_modules, (void*)m);
- if (!open && jl_module_init_order != NULL) {
- size_t i, l = jl_array_len(jl_module_init_order);
- for (i = 0; i < l; i++) {
- if (m == (jl_module_t*)jl_array_ptr_ref(jl_module_init_order, i)) {
- open = 1;
- break;
- }
+ JL_LOCK(&jl_modules_mutex);
+ int open = ptrhash_has(&jl_current_modules, (void*)m);
+ if (!open && jl_module_init_order != NULL) {
+ size_t i, l = jl_array_len(jl_module_init_order);
+ for (i = 0; i < l; i++) {
+ if (m == (jl_module_t*)jl_array_ptr_ref(jl_module_init_order, i)) {
+ open = 1;
+ break;
}
}
- JL_UNLOCK(&jl_modules_mutex);
- if (!open) {
- jl_errorf("Creating a new global in closed module `%s` (`%s`) breaks incremental compilation "
- "because the side effects will not be permanent.",
- jl_symbol_name(m->name), jl_symbol_name(var));
- }
+ }
+ JL_UNLOCK(&jl_modules_mutex);
+ return open;
+}
+
+extern void check_safe_newbinding(jl_module_t *m, jl_sym_t *var)
+{
+ if (jl_current_task->ptls->in_pure_callback)
+ jl_errorf("new strong globals cannot be created in a generated function. Declare them outside using `global x::Any`.");
+ if (jl_options.incremental && jl_generating_output() && !is_module_open(m)) {
+ jl_errorf("Creating a new global in closed module `%s` (`%s`) breaks incremental compilation "
+ "because the side effects will not be permanent.",
+ jl_symbol_name(m->name), jl_symbol_name(var));
}
}
-static jl_module_t *jl_binding_dbgmodule(jl_binding_t *b, jl_module_t *m, jl_sym_t *var) JL_GLOBALLY_ROOTED;
+static jl_module_t *jl_binding_dbgmodule(jl_binding_t *b) JL_GLOBALLY_ROOTED;
// Checks that the binding in general is currently writable, but does not perform any checks on the
// value to be written into the binding.
JL_DLLEXPORT void jl_check_binding_currently_writable(jl_binding_t *b, jl_module_t *m, jl_sym_t *s)
{
jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
-retry:
- if (decode_restriction_kind(pku) != BINDING_KIND_GLOBAL && !jl_bkind_is_some_constant(decode_restriction_kind(pku))) {
- if (jl_bkind_is_some_guard(decode_restriction_kind(pku))) {
- if (decode_restriction_kind(pku) != BINDING_KIND_DECLARED) {
- jl_errorf("Global %s.%s does not exist and cannot be assigned.\n"
- "Note: Julia 1.9 and 1.10 inadvertently omitted this error check (#56933).\n"
- "Hint: Declare it using `global %s` inside `%s` before attempting assignment.",
- jl_symbol_name(m->name), jl_symbol_name(s),
- jl_symbol_name(s), jl_symbol_name(m->name));
- }
- jl_ptr_kind_union_t new_pku = encode_restriction((jl_value_t*)jl_any_type, BINDING_KIND_GLOBAL);
- if (!jl_atomic_cmpswap(&bpart->restriction, &pku, new_pku))
- goto retry;
- jl_gc_wb_knownold(bpart, jl_any_type);
- } else {
- jl_module_t *from = jl_binding_dbgmodule(b, m, s);
- if (from == m)
+ if (jl_options.depwarn && (bpart->kind & PARTITION_FLAG_DEPWARN)) {
+ jl_binding_deprecation_warning(b);
+ }
+ enum jl_partition_kind kind = jl_binding_kind(bpart);
+ if (kind != PARTITION_KIND_GLOBAL && kind != PARTITION_KIND_DECLARED) {
+ if (jl_bkind_is_some_guard(kind)) {
+ jl_errorf("Global %s.%s does not exist and cannot be assigned.\n"
+ "Note: Julia 1.9 and 1.10 inadvertently omitted this error check (#56933).\n"
+ "Hint: Declare it using `global %s` inside `%s` before attempting assignment.",
+ jl_symbol_name(m->name), jl_symbol_name(s),
+ jl_symbol_name(s), jl_symbol_name(m->name));
+ }
+ else if (jl_bkind_is_some_constant(kind) && kind != PARTITION_KIND_IMPLICIT_CONST) {
+ jl_errorf("invalid assignment to constant %s.%s. This redefinition may be permitted using the `const` keyword.",
+ jl_symbol_name(m->name), jl_symbol_name(s));
+ }
+ else {
+ jl_module_t *from = jl_binding_dbgmodule(b);
+ if (from == m || !from)
jl_errorf("cannot assign a value to imported variable %s.%s",
- jl_symbol_name(from->name), jl_symbol_name(s));
+ jl_symbol_name(m->name), jl_symbol_name(s));
else
jl_errorf("cannot assign a value to imported variable %s.%s from module %s",
jl_symbol_name(from->name), jl_symbol_name(s), jl_symbol_name(m->name));
@@ -321,337 +831,251 @@ JL_DLLEXPORT jl_binding_t *jl_get_binding_wr(jl_module_t *m JL_PROPAGATES_ROOT,
// return module of binding
JL_DLLEXPORT jl_module_t *jl_get_module_of_binding(jl_module_t *m, jl_sym_t *var)
{
- jl_binding_t *b = jl_get_binding(m, var);
- if (b == NULL)
- return NULL;
- return b->globalref->mod; // TODO: deprecate this?
+ jl_binding_t *b = jl_get_module_binding(m, var, 1);
+ jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
+ jl_walk_binding_inplace(&b, &bpart, jl_current_task->world_age);
+ if (jl_binding_kind(bpart) == PARTITION_KIND_IMPLICIT_CONST) {
+ struct implicit_search_resolution resolution = jl_resolve_implicit_import(b, NULL, jl_current_task->world_age, 0);
+ if (!resolution.debug_only_ultimate_binding)
+ jl_error("Constant binding was imported from multiple modules");
+ b = resolution.debug_only_ultimate_binding;
+ }
+ return b ? b->globalref->mod : m;
}
static NOINLINE void print_backdate_admonition(jl_binding_t *b) JL_NOTSAFEPOINT
{
+ if (jl_options.depwarn == JL_OPTIONS_DEPWARN_ERROR)
+ jl_undefined_var_error(b->globalref->name, (jl_value_t*)b->globalref->mod);
jl_safe_printf(
"WARNING: Detected access to binding `%s.%s` in a world prior to its definition world.\n"
" Julia 1.12 has introduced more strict world age semantics for global bindings.\n"
" !!! This code may malfunction under Revise.\n"
" !!! This code will error in future versions of Julia.\n"
- "Hint: Add an appropriate `invokelatest` around the access to this binding.\n",
+ "Hint: Add an appropriate `invokelatest` around the access to this binding.\n"
+ "To make this warning an error, and hence obtain a stack trace, use `julia --depwarn=error`.\n",
jl_symbol_name(b->globalref->mod->name), jl_symbol_name(b->globalref->name));
- b->did_print_backdate_admonition = 1;
}
static inline void check_backdated_binding(jl_binding_t *b, enum jl_partition_kind kind) JL_NOTSAFEPOINT
{
- if (__unlikely(kind == BINDING_KIND_BACKDATED_CONST) &&
- !b->did_print_backdate_admonition) {
+ if (__unlikely(kind == PARTITION_KIND_BACKDATED_CONST) &&
+ !(jl_atomic_fetch_or_relaxed(&b->flags, BINDING_FLAG_DID_PRINT_BACKDATE_ADMONITION) & BINDING_FLAG_DID_PRINT_BACKDATE_ADMONITION)) {
print_backdate_admonition(b);
}
}
JL_DLLEXPORT jl_value_t *jl_get_binding_value(jl_binding_t *b)
+{
+ return jl_get_binding_value_in_world(b, jl_current_task->world_age);
+}
+
+JL_DLLEXPORT jl_value_t *jl_get_binding_value_in_world(jl_binding_t *b, size_t world)
+{
+ jl_binding_partition_t *bpart = jl_get_binding_partition(b, world);
+ jl_walk_binding_inplace(&b, &bpart, world);
+ enum jl_partition_kind kind = jl_binding_kind(bpart);
+ if (jl_bkind_is_some_guard(kind))
+ return NULL;
+ if (jl_bkind_is_some_constant(kind)) {
+ check_backdated_binding(b, kind);
+ return bpart->restriction;
+ }
+ assert(!jl_bkind_is_some_import(kind));
+ return jl_atomic_load_relaxed(&b->value);
+}
+
+JL_DLLEXPORT jl_value_t *jl_get_binding_value_depwarn(jl_binding_t *b)
{
jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- jl_ptr_kind_union_t pku = jl_walk_binding_inplace(&b, &bpart, jl_current_task->world_age);
- enum jl_partition_kind kind = decode_restriction_kind(pku);
+ if (jl_options.depwarn) {
+ int needs_depwarn = 0;
+ jl_walk_binding_inplace_depwarn(&b, &bpart, jl_current_task->world_age, &needs_depwarn);
+ if (needs_depwarn)
+ jl_binding_deprecation_warning(b);
+ } else {
+ jl_walk_binding_inplace(&b, &bpart, jl_current_task->world_age);
+ }
+ enum jl_partition_kind kind = jl_binding_kind(bpart);
if (jl_bkind_is_some_guard(kind))
return NULL;
if (jl_bkind_is_some_constant(kind)) {
check_backdated_binding(b, kind);
- return decode_restriction_value(pku);
+ return bpart->restriction;
}
+ assert(!jl_bkind_is_some_import(kind));
return jl_atomic_load_relaxed(&b->value);
}
+
JL_DLLEXPORT jl_value_t *jl_get_binding_value_seqcst(jl_binding_t *b)
{
jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- jl_ptr_kind_union_t pku = jl_walk_binding_inplace(&b, &bpart, jl_current_task->world_age);
- enum jl_partition_kind kind = decode_restriction_kind(pku);
+ jl_walk_binding_inplace(&b, &bpart, jl_current_task->world_age);
+ enum jl_partition_kind kind = jl_binding_kind(bpart);
if (jl_bkind_is_some_guard(kind))
return NULL;
if (jl_bkind_is_some_constant(kind)) {
check_backdated_binding(b, kind);
- return decode_restriction_value(pku);
+ return bpart->restriction;
}
+ assert(!jl_bkind_is_some_import(kind));
return jl_atomic_load(&b->value);
}
JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_const(jl_binding_t *b)
{
jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- jl_ptr_kind_union_t pku = jl_walk_binding_inplace(&b, &bpart, jl_current_task->world_age);
- enum jl_partition_kind kind = decode_restriction_kind(pku);
+ jl_walk_binding_inplace(&b, &bpart, jl_current_task->world_age);
+ enum jl_partition_kind kind = jl_binding_kind(bpart);
if (jl_bkind_is_some_guard(kind))
return NULL;
if (!jl_bkind_is_some_constant(kind))
return NULL;
check_backdated_binding(b, kind);
- return decode_restriction_value(pku);
+ return bpart->restriction;
}
-JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_resolved_and_const(jl_binding_t *b)
+JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_latest_resolved_and_const_debug_only(jl_binding_t *b)
{
// Unlike jl_get_binding_value_if_const this doesn't try to allocate new binding partitions if they
- // don't already exist, making this JL_NOTSAFEPOINT.
+ // don't already exist, making this JL_NOTSAFEPOINT. However, as a result, this may fail to return
+ // a value - even if one does exist. It should only be used for reflection/debugging when the integrity
+ // of the runtime is not guaranteed.
if (!b)
return NULL;
jl_binding_partition_t *bpart = jl_atomic_load_relaxed(&b->partitions);
if (!bpart)
return NULL;
size_t max_world = jl_atomic_load_relaxed(&bpart->max_world);
- if (bpart->min_world > jl_current_task->world_age || jl_current_task->world_age > max_world)
+ if (jl_atomic_load_relaxed(&bpart->min_world) > jl_current_task->world_age || jl_current_task->world_age > max_world)
return NULL;
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
- enum jl_partition_kind kind = decode_restriction_kind(pku);
+ enum jl_partition_kind kind = jl_binding_kind(bpart);
if (jl_bkind_is_some_guard(kind))
return NULL;
if (!jl_bkind_is_some_constant(kind))
return NULL;
check_backdated_binding(b, kind);
- return decode_restriction_value(pku);
+ return bpart->restriction;
}
-JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_resolved(jl_binding_t *b)
+JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_resolved_debug_only(jl_binding_t *b)
{
- // Unlike jl_get_binding_value this doesn't try to allocate new binding partitions if they
- // don't already exist, making this JL_NOTSAFEPOINT.
+ // See note above. Use for debug/reflection purposes only.
if (!b)
return NULL;
jl_binding_partition_t *bpart = jl_atomic_load_relaxed(&b->partitions);
if (!bpart)
return NULL;
size_t max_world = jl_atomic_load_relaxed(&bpart->max_world);
- if (bpart->min_world > jl_current_task->world_age || jl_current_task->world_age > max_world)
+ if (jl_atomic_load_relaxed(&bpart->min_world) > jl_current_task->world_age || jl_current_task->world_age > max_world)
return NULL;
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
- enum jl_partition_kind kind = decode_restriction_kind(pku);
+ enum jl_partition_kind kind = jl_binding_kind(bpart);
if (jl_bkind_is_some_guard(kind))
return NULL;
if (jl_bkind_is_some_import(kind))
return NULL;
if (jl_bkind_is_some_constant(kind)) {
check_backdated_binding(b, kind);
- return decode_restriction_value(pku);
+ return bpart->restriction;
}
return jl_atomic_load_relaxed(&b->value);
}
JL_DLLEXPORT jl_value_t *jl_bpart_get_restriction_value(jl_binding_partition_t *bpart)
{
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
- jl_value_t *v = decode_restriction_value(pku);
+ jl_value_t *v = bpart->restriction;
if (!v)
jl_throw(jl_undefref_exception);
return v;
}
-typedef struct _modstack_t {
- jl_module_t *m;
- jl_sym_t *var;
- struct _modstack_t *prev;
-} modstack_t;
-static jl_binding_t *jl_resolve_owner(jl_binding_t *b/*optional*/, jl_module_t *m JL_PROPAGATES_ROOT, jl_sym_t *var, modstack_t *st, size_t world);
-
-JL_DLLEXPORT jl_value_t *jl_reresolve_binding_value_seqcst(jl_binding_t *b)
-{
- jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- if (jl_bkind_is_some_guard(decode_restriction_kind(jl_atomic_load_relaxed(&bpart->restriction)))) {
- jl_resolve_owner(b, b->globalref->mod, b->globalref->name, NULL, jl_current_task->world_age);
- }
- return jl_get_binding_value_seqcst(b);
-}
-
-// get binding for adding a method
-// like jl_get_binding_wr, but has different error paths and messages
-JL_DLLEXPORT jl_binding_t *jl_get_binding_for_method_def(jl_module_t *m, jl_sym_t *var)
-{
- jl_binding_t *b = jl_get_module_binding(m, var, 1);
- jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
- if (decode_restriction_kind(pku) != BINDING_KIND_GLOBAL && !jl_bkind_is_some_constant(decode_restriction_kind(pku))) {
- if (jl_bkind_is_some_guard(decode_restriction_kind(pku))) {
- if (decode_restriction_kind(pku) != BINDING_KIND_DECLARED) {
- check_safe_newbinding(m, var);
- }
- return b;
- }
- jl_value_t *f = jl_get_binding_value_if_const(b);
- if (f == NULL) {
- jl_module_t *from = jl_binding_dbgmodule(b, m, var);
- // we must have implicitly imported this with using, so call jl_binding_dbgmodule to try to get the name of the module we got this from
- jl_errorf("invalid method definition in %s: exported function %s.%s does not exist",
- jl_symbol_name(m->name), jl_symbol_name(from->name), jl_symbol_name(var));
- }
- // TODO: we might want to require explicitly importing types to add constructors
- // or we might want to drop this error entirely
- if (decode_restriction_kind(pku) != BINDING_KIND_IMPORTED && !(f && jl_is_type(f) && strcmp(jl_symbol_name(var), "=>") != 0)) {
- jl_module_t *from = jl_binding_dbgmodule(b, m, var);
- jl_errorf("invalid method definition in %s: function %s.%s must be explicitly imported to be extended",
- jl_symbol_name(m->name), jl_symbol_name(from->name), jl_symbol_name(var));
- }
- return b;
- }
- return b;
-}
-
-static int eq_bindings(jl_binding_partition_t *owner, jl_binding_t *alias, size_t world)
-{
- jl_ptr_kind_union_t owner_pku = jl_atomic_load_relaxed(&owner->restriction);
- assert(decode_restriction_kind(owner_pku) == BINDING_KIND_GLOBAL || decode_restriction_kind(owner_pku) == BINDING_KIND_DECLARED ||
- jl_bkind_is_some_constant(decode_restriction_kind(owner_pku)));
- jl_binding_partition_t *alias_bpart = jl_get_binding_partition(alias, world);
- if (owner == alias_bpart)
- return 1;
- jl_ptr_kind_union_t alias_pku = jl_walk_binding_inplace(&alias, &alias_bpart, world);
- if (jl_bkind_is_some_constant(decode_restriction_kind(owner_pku)) &&
- jl_bkind_is_some_constant(decode_restriction_kind(alias_pku)) &&
- decode_restriction_value(owner_pku) &&
- decode_restriction_value(alias_pku) == decode_restriction_value(owner_pku))
- return 1;
- return owner == alias_bpart;
-}
-
-// find a binding from a module's `usings` list
-static jl_binding_t *using_resolve_binding(jl_module_t *m JL_PROPAGATES_ROOT, jl_sym_t *var, jl_module_t **from, modstack_t *st, int warn, size_t world)
-{
- jl_binding_t *b = NULL;
- jl_binding_partition_t *bpart = NULL;
- jl_module_t *owner = NULL;
- JL_LOCK(&m->lock);
- int i = (int)module_usings_length(m) - 1;
- JL_UNLOCK(&m->lock);
- for (; i >= 0; --i) {
- JL_LOCK(&m->lock);
- jl_module_t *imp = module_usings_getmod(m, i);
- JL_UNLOCK(&m->lock);
- jl_binding_t *tempb = jl_get_module_binding(imp, var, 0);
- if (tempb != NULL && tempb->exportp) {
- tempb = jl_resolve_owner(NULL, imp, var, st, world); // find the owner for tempb
- if (tempb == NULL)
- // couldn't resolve; try next using (see issue #6105)
- continue;
- jl_binding_partition_t *tempbpart = jl_get_binding_partition(tempb, world);
- jl_ptr_kind_union_t tempb_pku = jl_atomic_load_relaxed(&tempbpart->restriction);
- assert(jl_bkind_is_some_guard(decode_restriction_kind(tempb_pku)) || decode_restriction_kind(tempb_pku) == BINDING_KIND_GLOBAL || decode_restriction_kind(tempb_pku) == BINDING_KIND_DECLARED || jl_bkind_is_some_constant(decode_restriction_kind(tempb_pku)));
- (void)tempb_pku;
- if (bpart != NULL && !tempb->deprecated && !b->deprecated && !eq_bindings(tempbpart, b, world)) {
- if (warn) {
- // set usingfailed=1 to avoid repeating this warning
- // the owner will still be NULL, so it can be later imported or defined
- tempb = jl_get_module_binding(m, var, 1);
- tempbpart = jl_get_binding_partition(tempb, world);
- jl_atomic_store_release(&tempbpart->restriction, encode_restriction(NULL, BINDING_KIND_FAILED));
- }
- return NULL;
- }
- if (owner == NULL || !tempb->deprecated) {
- owner = imp;
- b = tempb;
- bpart = tempbpart;
- }
- }
- }
- *from = owner;
- return b;
-}
-
// for error message printing: look up the module that exported a binding to m as var
// this might not be the same as the owner of the binding, since the binding itself may itself have been imported from elsewhere
-static jl_module_t *jl_binding_dbgmodule(jl_binding_t *b, jl_module_t *m, jl_sym_t *var)
+static jl_module_t *jl_binding_dbgmodule(jl_binding_t *b)
{
jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- if (decode_restriction_kind(jl_atomic_load_relaxed(&bpart->restriction)) != BINDING_KIND_GLOBAL) {
- // for implicitly imported globals, try to re-resolve it to find the module we got it from most directly
- jl_module_t *from = NULL;
- jl_binding_t *b2 = using_resolve_binding(m, var, &from, NULL, 0, jl_current_task->world_age);
- if (b2) {
- jl_binding_partition_t *b2part = jl_get_binding_partition(b2, jl_current_task->world_age);
- if (eq_bindings(b2part, b, jl_current_task->world_age))
- return from;
- // if we did not find it (or accidentally found a different one), ignore this
- }
+ enum jl_partition_kind kind = jl_binding_kind(bpart);
+ if (jl_bkind_is_some_explicit_import(kind) || kind == PARTITION_KIND_IMPLICIT_GLOBAL) {
+ return ((jl_binding_t*)bpart->restriction)->globalref->mod;
}
- return m;
+ if (kind == PARTITION_KIND_IMPLICIT_CONST) {
+ struct implicit_search_resolution resolution = jl_resolve_implicit_import(b, NULL, jl_current_task->world_age, 1);
+ return resolution.debug_only_import_from;
+ }
+ return b->globalref->mod;
}
-static void jl_binding_dep_message(jl_module_t *m, jl_sym_t *name, jl_binding_t *b);
-
-// get binding for reading. might return NULL for unbound.
-static jl_binding_t *jl_resolve_owner(jl_binding_t *b/*optional*/, jl_module_t *m, jl_sym_t *var, modstack_t *st, size_t world)
+// Look at the given binding and decide whether to add a new method to an existing generic function
+// or ask for the creation of a new generic function (NULL return), checking various error conditions
+// along the way.
+JL_DLLEXPORT jl_value_t *jl_get_existing_strong_gf(jl_binding_t *b, size_t new_world)
{
- if (b == NULL)
- b = jl_get_module_binding(m, var, 1);
- jl_binding_partition_t *bpart = jl_get_binding_partition(b, world);
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
-retry:
- if (decode_restriction_kind(pku) == BINDING_KIND_FAILED)
+ jl_binding_partition_t *bpart = jl_get_binding_partition(b, new_world);
+ enum jl_partition_kind kind = jl_binding_kind(bpart);
+ if (jl_bkind_is_some_constant(kind) && kind != PARTITION_KIND_IMPLICIT_CONST)
+ return bpart->restriction;
+ if (jl_bkind_is_some_guard(kind) || kind == PARTITION_KIND_DECLARED) {
+ check_safe_newbinding(b->globalref->mod, b->globalref->name);
return NULL;
- if (decode_restriction_kind(pku) == BINDING_KIND_DECLARED) {
- return b;
}
- if (decode_restriction_kind(pku) == BINDING_KIND_GUARD) {
- jl_binding_t *b2 = NULL;
- modstack_t top = { m, var, st };
- modstack_t *tmp = st;
- for (; tmp != NULL; tmp = tmp->prev) {
- if (tmp->m == m && tmp->var == var) {
- // import cycle without finding actual location
- return NULL;
- }
+ if (!jl_bkind_is_some_import(kind)) {
+ jl_errorf("cannot define function %s; it already has a value", jl_symbol_name(b->globalref->name));
+ }
+ jl_binding_t *ownerb = b;
+ jl_walk_binding_inplace(&ownerb, &bpart, new_world);
+ jl_value_t *f = NULL;
+ if (jl_bkind_is_some_constant(jl_binding_kind(bpart)))
+ f = bpart->restriction;
+ if (f == NULL) {
+ if (jl_bkind_is_some_implicit(kind)) {
+ check_safe_newbinding(b->globalref->mod, b->globalref->name);
+ return NULL;
}
- jl_module_t *from = NULL; // for error message printing
- b2 = using_resolve_binding(m, var, &from, &top, 1, world);
- if (b2 == NULL)
+ jl_module_t *from = jl_binding_dbgmodule(b);\
+ assert(from); // Can only be NULL if implicit, which we excluded above
+ jl_errorf("invalid method definition in %s: exported function %s.%s does not exist",
+ jl_module_debug_name(b->globalref->mod), jl_module_debug_name(from), jl_symbol_name(b->globalref->name));
+ }
+ int istype = f && jl_is_type(f);
+ if (!istype) {
+ if (jl_bkind_is_some_implicit(kind)) {
+ check_safe_newbinding(b->globalref->mod, b->globalref->name);
return NULL;
- assert(from);
- JL_GC_PROMISE_ROOTED(from); // gc-analysis does not understand output parameters
- JL_GC_PROMISE_ROOTED(b2);
- if (b2->deprecated) {
- if (jl_get_binding_value(b2) == jl_nothing) {
- // silently skip importing deprecated values assigned to nothing (to allow later mutation)
- return NULL;
- }
}
- // do a full import to prevent the result of this lookup from
- // changing, for example if this var is assigned to later.
- if (!jl_atomic_cmpswap(&bpart->restriction, &pku, encode_restriction((jl_value_t*)b2, BINDING_KIND_IMPLICIT)))
- goto retry;
- jl_gc_wb(bpart, b2);
- if (b2->deprecated) {
- b->deprecated = 1; // we will warn about this below, but we might want to warn at the use sites too
- if (m != jl_main_module && m != jl_base_module &&
- jl_options.depwarn != JL_OPTIONS_DEPWARN_OFF) {
- /* with #22763, external packages wanting to replace
- deprecated Base bindings should simply export the new
- binding */
- jl_printf(JL_STDERR,
- "WARNING: using deprecated binding %s.%s in %s.\n",
- jl_symbol_name(from->name), jl_symbol_name(var),
- jl_symbol_name(m->name));
- jl_binding_dep_message(from, var, b2);
- }
+ else if (kind != PARTITION_KIND_IMPORTED) {
+ // TODO: we might want to require explicitly importing types to add constructors
+ // or we might want to drop this error entirely
+ jl_module_t *from = jl_binding_dbgmodule(b);
+ assert(from); // Can only be NULL if implicit, which we excluded above
+ jl_errorf("invalid method definition in %s: function %s.%s must be explicitly imported to be extended",
+ jl_module_debug_name(b->globalref->mod), jl_module_debug_name(from), jl_symbol_name(b->globalref->name));
}
- return b2;
}
- jl_walk_binding_inplace(&b, &bpart, world);
- return b;
+ else if (kind != PARTITION_KIND_IMPORTED) {
+ int should_error = strcmp(jl_symbol_name(b->globalref->name), "=>") == 0;
+ jl_module_t *from = jl_binding_dbgmodule(b);
+ if (should_error) {
+ jl_errorf("invalid method definition in %s: function %s.%s must be explicitly imported to be extended",
+ jl_module_debug_name(b->globalref->mod), from ? jl_module_debug_name(from) : "", jl_symbol_name(b->globalref->name));
+ }
+ else if (!(jl_atomic_fetch_or_relaxed(&b->flags, BINDING_FLAG_DID_PRINT_IMPLICIT_IMPORT_ADMONITION) &
+ BINDING_FLAG_DID_PRINT_IMPLICIT_IMPORT_ADMONITION)) {
+ jl_printf(JL_STDERR, "WARNING: Constructor for type \"%s\" was extended in `%s` without explicit qualification or import.\n"
+ " NOTE: Assumed \"%s\" refers to `%s.%s`. This behavior is deprecated and may differ in future versions.`\n"
+ " NOTE: This behavior may have differed in Julia versions prior to 1.12.\n"
+ " Hint: If you intended to create a new generic function of the same name, use `function %s end`.\n"
+ " Hint: To silence the warning, qualify `%s` as `%s.%s` in the method signature or explicitly `import %s: %s`.\n",
+ jl_symbol_name(b->globalref->name), jl_module_debug_name(b->globalref->mod),
+ jl_symbol_name(b->globalref->name), jl_module_debug_name(from), jl_symbol_name(b->globalref->name),
+ jl_symbol_name(b->globalref->name), jl_symbol_name(b->globalref->name), jl_module_debug_name(from), jl_symbol_name(b->globalref->name),
+ jl_module_debug_name(from), jl_symbol_name(b->globalref->name));
+ }
+ }
+ return f;
}
-// get the current likely owner of binding when accessing m.var, without resolving the binding (it may change later)
-JL_DLLEXPORT jl_binding_t *jl_binding_owner(jl_module_t *m, jl_sym_t *var)
-{
- jl_binding_t *b = jl_get_module_binding(m, var, 1);
- jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- jl_module_t *from = m;
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
- if (decode_restriction_kind(pku) == BINDING_KIND_GUARD) {
- b = using_resolve_binding(m, var, &from, NULL, 0, jl_current_task->world_age);
- bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- }
- pku = jl_walk_binding_inplace(&b, &bpart, jl_current_task->world_age);
- if (decode_restriction_kind(pku) != BINDING_KIND_GLOBAL && !jl_bkind_is_some_constant(decode_restriction_kind(pku)))
- return NULL;
- return b;
-}
+static void jl_binding_dep_message(jl_binding_t *b);
// get type of binding m.var, without resolving the binding
JL_DLLEXPORT jl_value_t *jl_get_binding_type(jl_module_t *m, jl_sym_t *var)
@@ -660,33 +1084,23 @@ JL_DLLEXPORT jl_value_t *jl_get_binding_type(jl_module_t *m, jl_sym_t *var)
jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
if (b == NULL)
return jl_nothing;
- jl_ptr_kind_union_t pku = jl_walk_binding_inplace(&b, &bpart, jl_current_task->world_age);
- if (jl_bkind_is_some_guard(decode_restriction_kind(pku)))
+ jl_walk_binding_inplace(&b, &bpart, jl_current_task->world_age);
+ enum jl_partition_kind kind = jl_binding_kind(bpart);
+ if (jl_bkind_is_some_guard(kind) || kind == PARTITION_KIND_DECLARED)
return jl_nothing;
- if (jl_bkind_is_some_constant(decode_restriction_kind(pku))) {
+ if (jl_bkind_is_some_constant(kind)) {
// TODO: We would like to return the type of the constant, but
// currently code relies on this returning any to bypass conversion
// before an attempted assignment to a constant.
- // return jl_typeof(jl_atomic_load_relaxed(&bpart->restriction));
+ // return bpart->restriction;
return (jl_value_t*)jl_any_type;
}
- return decode_restriction_value(pku);
+ return bpart->restriction;
}
JL_DLLEXPORT jl_binding_t *jl_get_binding(jl_module_t *m, jl_sym_t *var)
{
- return jl_resolve_owner(NULL, m, var, NULL, jl_current_task->world_age);
-}
-
-JL_DLLEXPORT jl_binding_t *jl_get_binding_or_error(jl_module_t *m, jl_sym_t *var)
-{
- jl_binding_t *b = jl_get_binding(m, var);
- if (b == NULL)
- jl_undefined_var_error(var, (jl_value_t*)m);
- // XXX: this only considers if the original is deprecated, not the binding in m
- if (b->deprecated)
- jl_binding_deprecation_warning(m, var, b);
- return b;
+ return jl_get_module_binding(m, var, 1);
}
JL_DLLEXPORT jl_value_t *jl_module_globalref(jl_module_t *m, jl_sym_t *var)
@@ -702,7 +1116,7 @@ JL_DLLEXPORT int jl_is_imported(jl_module_t *m, jl_sym_t *var)
{
jl_binding_t *b = jl_get_module_binding(m, var, 0);
jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- return b && decode_restriction_kind(jl_atomic_load_relaxed(&bpart->restriction)) == BINDING_KIND_IMPORTED;
+ return b && jl_binding_kind(bpart) == PARTITION_KIND_IMPORTED;
}
extern const char *jl_filename;
@@ -710,8 +1124,10 @@ extern int jl_lineno;
static char const dep_message_prefix[] = "_dep_message_";
-static void jl_binding_dep_message(jl_module_t *m, jl_sym_t *name, jl_binding_t *b)
+static void jl_binding_dep_message(jl_binding_t *b)
{
+ jl_module_t *m = b->globalref->mod;
+ jl_sym_t *name = b->globalref->name;
size_t prefix_len = strlen(dep_message_prefix);
size_t name_len = strlen(jl_symbol_name(name));
char *dep_binding_name = (char*)alloca(prefix_len+name_len+1);
@@ -758,174 +1174,264 @@ static void jl_binding_dep_message(jl_module_t *m, jl_sym_t *name, jl_binding_t
JL_GC_POP();
}
+JL_DLLEXPORT void check_safe_import_from(jl_module_t *m)
+{
+ if (jl_options.incremental && jl_generating_output() && m == jl_main_module) {
+ jl_errorf("Any `import` or `using` from `Main` is prohibited during incremental compilation.");
+ }
+}
+
+static int eq_bindings(jl_binding_partition_t *owner, jl_binding_t *alias, size_t world)
+{
+ jl_binding_t *ownerb = NULL;
+ jl_binding_partition_t *alias_bpart = jl_get_binding_partition(alias, world);
+ if (owner == alias_bpart)
+ return 1;
+ jl_walk_binding_inplace(&ownerb, &owner, world);
+ jl_walk_binding_inplace(&alias, &alias_bpart, world);
+ if (jl_bkind_is_some_constant(jl_binding_kind(owner)) &&
+ jl_bkind_is_some_constant(jl_binding_kind(alias_bpart)) &&
+ owner->restriction &&
+ alias_bpart->restriction == owner->restriction)
+ return 1;
+ return owner == alias_bpart;
+}
+
// NOTE: we use explici since explicit is a C++ keyword
-static void module_import_(jl_module_t *to, jl_module_t *from, jl_sym_t *asname, jl_sym_t *s, int explici)
+static void module_import_(jl_task_t *ct, jl_module_t *to, jl_module_t *from, jl_sym_t *asname, jl_sym_t *s, int explici)
{
+ check_safe_import_from(from);
jl_binding_t *b = jl_get_binding(from, s);
- if (b == NULL) {
+ jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
+ if (bpart->kind & PARTITION_FLAG_DEPRECATED) {
+ if (jl_get_binding_value(b) == jl_nothing) {
+ // silently skip importing deprecated values assigned to nothing (to allow later mutation)
+ return;
+ }
+ else if (to != jl_main_module && to != jl_base_module &&
+ jl_options.depwarn != JL_OPTIONS_DEPWARN_OFF) {
+ /* with #22763, external packages wanting to replace
+ deprecated Base bindings should simply export the new
+ binding */
+ jl_printf(JL_STDERR,
+ "WARNING: importing deprecated binding %s.%s into %s%s%s.\n",
+ jl_symbol_name(from->name), jl_symbol_name(s),
+ jl_symbol_name(to->name),
+ asname == s ? "" : " as ",
+ asname == s ? "" : jl_symbol_name(asname));
+ jl_binding_dep_message(b);
+ }
+ }
+
+ jl_binding_t *ownerb = b;
+ jl_binding_partition_t *ownerbpart = bpart;
+ jl_walk_binding_inplace(&ownerb, &ownerbpart, ct->world_age);
+
+ if (jl_bkind_is_some_guard(jl_binding_kind(ownerbpart))) {
jl_printf(JL_STDERR,
- "WARNING: could not import %s.%s into %s\n",
+ "WARNING: Imported binding %s.%s was undeclared at import time during import to %s.\n",
jl_symbol_name(from->name), jl_symbol_name(s),
jl_symbol_name(to->name));
}
+
+ jl_binding_t *bto = jl_get_module_binding(to, asname, 1);
+ if (bto == b) {
+ // importing a binding on top of itself. harmless.
+ return;
+ }
+ JL_LOCK(&world_counter_lock);
+ size_t new_world = jl_atomic_load_acquire(&jl_world_counter)+1;
+ jl_binding_partition_t *btopart = jl_get_binding_partition(bto, new_world);
+ enum jl_partition_kind btokind = jl_binding_kind(btopart);
+ if (jl_bkind_is_some_implicit(btokind)) {
+ jl_binding_partition_t *new_bpart = jl_replace_binding_locked(bto, btopart, (jl_value_t*)b, (explici != 0) ? PARTITION_KIND_IMPORTED : PARTITION_KIND_EXPLICIT, new_world);
+ if (jl_atomic_load_relaxed(&new_bpart->max_world) == ~(size_t)0)
+ jl_add_binding_backedge(b, (jl_value_t*)bto);
+ jl_atomic_store_release(&jl_world_counter, new_world);
+ }
else {
- jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
- assert(decode_restriction_kind(pku) == BINDING_KIND_GLOBAL || decode_restriction_kind(pku) == BINDING_KIND_DECLARED || jl_bkind_is_some_constant(decode_restriction_kind(pku)));
- (void)pku;
- if (b->deprecated) {
- if (jl_get_binding_value(b) == jl_nothing) {
- // silently skip importing deprecated values assigned to nothing (to allow later mutation)
- return;
- }
- else if (to != jl_main_module && to != jl_base_module &&
- jl_options.depwarn != JL_OPTIONS_DEPWARN_OFF) {
- /* with #22763, external packages wanting to replace
- deprecated Base bindings should simply export the new
- binding */
- jl_printf(JL_STDERR,
- "WARNING: importing deprecated binding %s.%s into %s%s%s.\n",
- jl_symbol_name(from->name), jl_symbol_name(s),
- jl_symbol_name(to->name),
- asname == s ? "" : " as ",
- asname == s ? "" : jl_symbol_name(asname));
- jl_binding_dep_message(from, s, b);
+ if (eq_bindings(bpart, bto, new_world)) {
+ // already imported - potentially upgrade _EXPLICIT to _IMPORTED
+ if (btokind == PARTITION_KIND_EXPLICIT && explici != 0) {
+ jl_replace_binding_locked(bto, btopart, (jl_value_t*)b, PARTITION_KIND_IMPORTED, new_world);
+ jl_atomic_store_release(&jl_world_counter, new_world);
}
}
-
- jl_binding_t *bto = jl_get_module_binding(to, asname, 1);
- if (bto == b) {
- // importing a binding on top of itself. harmless.
- return;
- }
- jl_binding_partition_t *btopart = jl_get_binding_partition(bto, jl_current_task->world_age);
- jl_ptr_kind_union_t bto_pku = jl_atomic_load_relaxed(&btopart->restriction);
-retry:
- if (decode_restriction_kind(bto_pku) == BINDING_KIND_GUARD ||
- decode_restriction_kind(bto_pku) == BINDING_KIND_IMPLICIT ||
- decode_restriction_kind(bto_pku) == BINDING_KIND_FAILED) {
-
- jl_ptr_kind_union_t new_pku = encode_restriction((jl_value_t*)b, (explici != 0) ? BINDING_KIND_IMPORTED : BINDING_KIND_EXPLICIT);
- if (!jl_atomic_cmpswap(&btopart->restriction, &bto_pku, new_pku))
- goto retry;
- jl_gc_wb(btopart, b);
- bto->deprecated |= b->deprecated; // we already warned about this above, but we might want to warn at the use sites too
+ else if (jl_bkind_is_some_import(btokind)) {
+ // already imported from somewhere else
+ jl_printf(JL_STDERR,
+ "WARNING: ignoring conflicting import of %s.%s into %s\n",
+ jl_symbol_name(from->name), jl_symbol_name(s),
+ jl_symbol_name(to->name));
}
else {
- if (eq_bindings(bpart, bto, jl_current_task->world_age)) {
- // already imported - potentially upgrade to _IMPORTED or _EXPLICIT
- if (jl_bkind_is_some_import(decode_restriction_kind(bto_pku))) {
- jl_ptr_kind_union_t new_pku = encode_restriction(decode_restriction_value(bto_pku), (explici != 0) ? BINDING_KIND_IMPORTED : BINDING_KIND_EXPLICIT);
- if (!jl_atomic_cmpswap(&btopart->restriction, &bto_pku, new_pku))
- goto retry;
- // No wb, because the value is unchanged
- }
- }
- else if (jl_bkind_is_some_import(decode_restriction_kind(bto_pku))) {
- // already imported from somewhere else
- jl_printf(JL_STDERR,
- "WARNING: ignoring conflicting import of %s.%s into %s\n",
- jl_symbol_name(from->name), jl_symbol_name(s),
- jl_symbol_name(to->name));
- }
- else {
- // conflict with name owned by destination module
- jl_printf(JL_STDERR,
- "WARNING: import of %s.%s into %s conflicts with an existing identifier; ignored.\n",
- jl_symbol_name(from->name), jl_symbol_name(s),
- jl_symbol_name(to->name));
- }
+ // conflict with name owned by destination module
+ jl_printf(JL_STDERR,
+ "WARNING: import of %s.%s into %s conflicts with an existing identifier; ignored.\n",
+ jl_symbol_name(from->name), jl_symbol_name(s),
+ jl_symbol_name(to->name));
}
}
+ JL_UNLOCK(&world_counter_lock);
}
-JL_DLLEXPORT void jl_module_import(jl_module_t *to, jl_module_t *from, jl_sym_t *s)
+JL_DLLEXPORT void jl_module_import(jl_task_t *ct, jl_module_t *to, jl_module_t *from, jl_sym_t *s)
{
- module_import_(to, from, s, s, 1);
+ module_import_(ct, to, from, s, s, 1);
}
-JL_DLLEXPORT void jl_module_import_as(jl_module_t *to, jl_module_t *from, jl_sym_t *s, jl_sym_t *asname)
+JL_DLLEXPORT void jl_module_import_as(jl_task_t *ct, jl_module_t *to, jl_module_t *from, jl_sym_t *s, jl_sym_t *asname)
{
- module_import_(to, from, asname, s, 1);
+ module_import_(ct, to, from, asname, s, 1);
}
-JL_DLLEXPORT void jl_module_use(jl_module_t *to, jl_module_t *from, jl_sym_t *s)
+JL_DLLEXPORT void jl_module_use(jl_task_t *ct, jl_module_t *to, jl_module_t *from, jl_sym_t *s)
{
- module_import_(to, from, s, s, 0);
+ module_import_(ct, to, from, s, s, 0);
}
-JL_DLLEXPORT void jl_module_use_as(jl_module_t *to, jl_module_t *from, jl_sym_t *s, jl_sym_t *asname)
+JL_DLLEXPORT void jl_module_use_as(jl_task_t *ct, jl_module_t *to, jl_module_t *from, jl_sym_t *s, jl_sym_t *asname)
{
- module_import_(to, from, asname, s, 0);
+ module_import_(ct, to, from, asname, s, 0);
+}
+
+void jl_add_usings_backedge(jl_module_t *from, jl_module_t *to)
+{
+ JL_LOCK(&from->lock);
+ if (from->usings_backedges == jl_nothing) {
+ from->usings_backedges = (jl_value_t*)jl_alloc_vec_any(0);
+ jl_gc_wb(from, from->usings_backedges);
+ }
+ jl_array_ptr_1d_push((jl_array_t*)from->usings_backedges, (jl_value_t*)to);
+ JL_UNLOCK(&from->lock);
+}
+
+void jl_module_initial_using(jl_module_t *to, jl_module_t *from)
+{
+ struct _jl_module_using new_item = {
+ .mod = from,
+ .min_world = 0,
+ .max_world = ~(size_t)0
+ };
+ arraylist_grow(&to->usings, sizeof(struct _jl_module_using)/sizeof(void*));
+ memcpy(&to->usings.items[to->usings.len-3], &new_item, sizeof(struct _jl_module_using));
+ jl_gc_wb(to, from);
+ jl_add_usings_backedge(from, to);
}
JL_DLLEXPORT void jl_module_using(jl_module_t *to, jl_module_t *from)
{
if (to == from)
return;
+ check_safe_import_from(from);
+ JL_LOCK(&world_counter_lock);
JL_LOCK(&to->lock);
for (size_t i = 0; i < module_usings_length(to); i++) {
if (from == module_usings_getmod(to, i)) {
JL_UNLOCK(&to->lock);
+ JL_UNLOCK(&world_counter_lock);
return;
}
}
+
+ size_t new_world = jl_atomic_load_acquire(&jl_world_counter)+1;
struct _jl_module_using new_item = {
.mod = from,
- .min_world = 0,
- .max_world = (size_t)-1
+ .min_world = new_world,
+ .max_world = ~(size_t)0
};
arraylist_grow(&to->usings, sizeof(struct _jl_module_using)/sizeof(void*));
memcpy(&to->usings.items[to->usings.len-3], &new_item, sizeof(struct _jl_module_using));
jl_gc_wb(to, from);
+
JL_UNLOCK(&to->lock);
- // print a warning if something visible via this "using" conflicts with
- // an existing identifier. note that an identifier added later may still
- // silently override a "using" name. see issue #2054.
+ // Go through all exported bindings. If we have a binding for this in the
+ // importing module and it is some import or guard, we need to recompute
+ // it.
jl_svec_t *table = jl_atomic_load_relaxed(&from->bindings);
for (size_t i = 0; i < jl_svec_len(table); i++) {
jl_binding_t *b = (jl_binding_t*)jl_svecref(table, i);
if ((void*)b == jl_nothing)
break;
- jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
- if (b->exportp && (decode_restriction_kind(pku) == BINDING_KIND_GLOBAL || decode_restriction_kind(pku) == BINDING_KIND_IMPORTED)) {
+ jl_binding_partition_t *frombpart = jl_get_binding_partition(b, new_world);
+ if (frombpart->kind & PARTITION_FLAG_EXPORTED) {
jl_sym_t *var = b->globalref->name;
jl_binding_t *tob = jl_get_module_binding(to, var, 0);
if (tob) {
- jl_binding_partition_t *tobpart = jl_get_binding_partition(tob, jl_current_task->world_age);
- jl_ptr_kind_union_t tobpku = jl_walk_binding_inplace(&tob, &tobpart, jl_current_task->world_age);
- if (tob && decode_restriction_kind(tobpku) != BINDING_KIND_GUARD &&
- // don't warn for conflicts with the module name itself.
- // see issue #4715
- var != to->name &&
- !eq_bindings(tobpart, b, jl_current_task->world_age)) {
- jl_printf(JL_STDERR,
- "WARNING: using %s.%s in module %s conflicts with an existing identifier.\n",
- jl_symbol_name(from->name), jl_symbol_name(var),
- jl_symbol_name(to->name));
+ jl_binding_partition_t *tobpart = jl_atomic_load_relaxed(&tob->partitions);
+ if (tobpart) {
+ enum jl_partition_kind kind = jl_binding_kind(tobpart);
+ if (jl_bkind_is_some_implicit(kind)) {
+ jl_replace_binding_locked(tob, tobpart, NULL, PARTITION_FAKE_KIND_IMPLICIT_RECOMPUTE, new_world);
+ }
}
}
}
table = jl_atomic_load_relaxed(&from->bindings);
}
+
+ jl_add_usings_backedge(from, to);
+
+ jl_atomic_store_release(&jl_world_counter, new_world);
+ JL_UNLOCK(&world_counter_lock);
+}
+
+JL_DLLEXPORT jl_value_t *jl_get_module_usings_backedges(jl_module_t *m)
+{
+ // We assume the caller holds the world_counter_lock, which is the only place we set this
+ // TODO: We may want to make this more precise with the module lock
+ return m->usings_backedges;
+}
+
+JL_DLLEXPORT size_t jl_module_scanned_methods_length(jl_module_t *m)
+{
+ JL_LOCK(&m->lock);
+ size_t len = 0;
+ if (m->scanned_methods != jl_nothing)
+ len = jl_array_len(m->scanned_methods);
+ JL_UNLOCK(&m->lock);
+ return len;
+}
+
+JL_DLLEXPORT jl_value_t *jl_module_scanned_methods_getindex(jl_module_t *m, size_t i)
+{
+ JL_LOCK(&m->lock);
+ assert(m->scanned_methods != jl_nothing);
+ jl_value_t *ret = jl_array_ptr_ref(m->scanned_methods, i-1);
+ JL_UNLOCK(&m->lock);
+ return ret;
+}
+
+JL_DLLEXPORT jl_value_t *jl_get_module_binding_or_nothing(jl_module_t *m, jl_sym_t *s)
+{
+ jl_binding_t *b = jl_get_module_binding(m, s, 0);
+ if (!b)
+ return jl_nothing;
+ return (jl_value_t*)b;
}
-JL_DLLEXPORT void jl_module_public(jl_module_t *from, jl_sym_t *s, int exported)
+int jl_module_public_(jl_module_t *from, jl_sym_t *s, int exported, size_t new_world)
{
+ // caller must hold world_counter_lock
jl_binding_t *b = jl_get_module_binding(from, s, 1);
- if (b->publicp) {
+ jl_binding_partition_t *bpart = jl_get_binding_partition(b, new_world);
+ int was_exported = (bpart->kind & PARTITION_FLAG_EXPORTED) != 0;
+ if (jl_atomic_load_relaxed(&b->flags) & BINDING_FLAG_PUBLICP) {
// check for conflicting declarations
- if (b->exportp && !exported)
+ if (was_exported && !exported)
jl_errorf("cannot declare %s.%s public; it is already declared exported",
jl_symbol_name(from->name), jl_symbol_name(s));
- if (!b->exportp && exported)
+ if (!was_exported && exported)
jl_errorf("cannot declare %s.%s exported; it is already declared public",
jl_symbol_name(from->name), jl_symbol_name(s));
}
- b->publicp = 1;
- b->exportp |= exported;
+ jl_atomic_fetch_or_relaxed(&b->flags, BINDING_FLAG_PUBLICP);
+ if (was_exported != exported) {
+ jl_replace_binding_locked2(b, bpart, bpart->restriction, bpart->kind | PARTITION_FLAG_EXPORTED, new_world);
+ return 1;
+ }
+ return 0;
}
JL_DLLEXPORT int jl_boundp(jl_module_t *m, jl_sym_t *var, int allow_import) // unlike most queries here, this is currently seq_cst
@@ -936,52 +1442,32 @@ JL_DLLEXPORT int jl_boundp(jl_module_t *m, jl_sym_t *var, int allow_import) // u
jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
if (!bpart)
return 0;
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
if (!allow_import) {
- if (!bpart || jl_bkind_is_some_import(decode_restriction_kind(pku)))
+ if (!bpart || jl_bkind_is_some_import(jl_binding_kind(bpart)))
return 0;
} else {
- if (jl_bkind_is_some_guard(decode_restriction_kind(pku))) {
- jl_resolve_owner(b, b->globalref->mod, b->globalref->name, NULL, jl_current_task->world_age);
- }
- pku = jl_walk_binding_inplace(&b, &bpart, jl_current_task->world_age);
+ jl_walk_binding_inplace(&b, &bpart, jl_current_task->world_age);
}
- if (jl_bkind_is_some_guard(decode_restriction_kind(pku)))
+ if (jl_bkind_is_some_guard(jl_binding_kind(bpart)))
return 0;
- if (jl_bkind_is_defined_constant(decode_restriction_kind(pku))) {
+ if (jl_bkind_is_defined_constant(jl_binding_kind(bpart))) {
// N.B.: No backdated check for isdefined
return 1;
}
return jl_atomic_load(&b->value) != NULL;
}
-JL_DLLEXPORT int jl_defines_or_exports_p(jl_module_t *m, jl_sym_t *var)
-{
- jl_binding_t *b = jl_get_module_binding(m, var, 0);
- jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- return b && (b->exportp || decode_restriction_kind(jl_atomic_load_relaxed(&bpart->restriction)) == BINDING_KIND_GLOBAL);
-}
-
JL_DLLEXPORT int jl_module_exports_p(jl_module_t *m, jl_sym_t *var)
{
jl_binding_t *b = jl_get_module_binding(m, var, 0);
- return b && b->exportp;
+ jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
+ return b && (bpart->kind & PARTITION_FLAG_EXPORTED);
}
JL_DLLEXPORT int jl_module_public_p(jl_module_t *m, jl_sym_t *var)
{
jl_binding_t *b = jl_get_module_binding(m, var, 0);
- return b && b->publicp;
-}
-
-JL_DLLEXPORT int jl_binding_resolved_p(jl_module_t *m, jl_sym_t *var)
-{
- jl_binding_t *b = jl_get_module_binding(m, var, 0);
- jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- if (!bpart)
- return 0;
- enum jl_partition_kind kind = decode_restriction_kind(jl_atomic_load_relaxed(&bpart->restriction));
- return kind == BINDING_KIND_DECLARED || !jl_bkind_is_some_guard(kind);
+ return b && (jl_atomic_load_relaxed(&b->flags) & BINDING_FLAG_PUBLICP);
}
uint_t bindingkey_hash(size_t idx, jl_value_t *data)
@@ -1052,20 +1538,15 @@ JL_DLLEXPORT jl_binding_t *jl_get_module_binding(jl_module_t *m, jl_sym_t *var,
JL_DLLEXPORT jl_value_t *jl_get_globalref_value(jl_globalref_t *gr)
{
jl_binding_t *b = gr->binding;
- b = jl_resolve_owner(b, gr->mod, gr->name, NULL, jl_current_task->world_age);
- // ignores b->deprecated
- return b == NULL ? NULL : jl_get_binding_value(b);
+ if (!b)
+ b = jl_get_module_binding(gr->mod, gr->name, 1);
+ return jl_get_binding_value_depwarn(b);
}
JL_DLLEXPORT jl_value_t *jl_get_global(jl_module_t *m, jl_sym_t *var)
{
- jl_binding_t *b = jl_get_binding(m, var);
- if (b == NULL)
- return NULL;
- // XXX: this only considers if the original is deprecated, not the binding in m
- if (b->deprecated)
- jl_binding_deprecation_warning(m, var, b);
- return jl_get_binding_value(b);
+ jl_binding_t *b = jl_get_module_binding(m, var, 1);
+ return jl_get_binding_value_depwarn(b);
}
JL_DLLEXPORT void jl_set_global(jl_module_t *m JL_ROOTING_ARGUMENT, jl_sym_t *var, jl_value_t *val JL_ROOTED_ARGUMENT)
@@ -1074,28 +1555,49 @@ JL_DLLEXPORT void jl_set_global(jl_module_t *m JL_ROOTING_ARGUMENT, jl_sym_t *va
jl_checked_assignment(bp, m, var, val);
}
+JL_DLLEXPORT void jl_set_initial_const(jl_module_t *m JL_ROOTING_ARGUMENT, jl_sym_t *var, jl_value_t *val JL_ROOTED_ARGUMENT, int exported)
+{
+ // this function is only valid during initialization, so there is no risk of data races her are not too important to use
+ int kind = PARTITION_KIND_CONST | (exported ? PARTITION_FLAG_EXPORTED : 0);
+ // jl_declare_constant_val3(NULL, m, var, (jl_value_t*)jl_any_type, kind, 0);
+ jl_binding_t *bp = jl_get_module_binding(m, var, 1);
+ jl_binding_partition_t *bpart = jl_get_binding_partition(bp, 0);
+ assert(jl_atomic_load_relaxed(&bpart->min_world) == 0);
+ jl_atomic_store_relaxed(&bpart->max_world, ~(size_t)0); // jl_check_new_binding_implicit likely incorrectly truncated it
+ if (exported)
+ jl_atomic_fetch_or_relaxed(&bp->flags, BINDING_FLAG_PUBLICP);
+ bpart->kind = kind | (bpart->kind & PARTITION_MASK_FLAG);
+ bpart->restriction = val;
+ jl_gc_wb(bpart, val);
+}
+
JL_DLLEXPORT void jl_set_const(jl_module_t *m JL_ROOTING_ARGUMENT, jl_sym_t *var, jl_value_t *val JL_ROOTED_ARGUMENT)
{
- // this function is mostly only used during initialization, so the data races here are not too important to us
+ // this function is dangerous and unsound. do not use.
jl_binding_t *bp = jl_get_module_binding(m, var, 1);
jl_binding_partition_t *bpart = jl_get_binding_partition(bp, jl_current_task->world_age);
- assert(jl_bkind_is_some_guard(decode_restriction_kind(jl_atomic_load_relaxed(&bpart->restriction))));
- jl_atomic_store_release(&bpart->restriction, encode_restriction(val, BINDING_KIND_CONST));
+ jl_atomic_store_relaxed(&bpart->min_world, 0);
+ jl_atomic_store_release(&bpart->max_world, ~(size_t)0);
+ bpart->kind = PARTITION_KIND_CONST | (bpart->kind & PARTITION_MASK_FLAG);
+ bpart->restriction = val;
jl_gc_wb(bpart, val);
}
-void jl_invalidate_binding_refs(jl_globalref_t *ref, jl_binding_partition_t *invalidated_bpart, size_t new_world)
+void jl_invalidate_binding_refs(jl_globalref_t *ref, jl_binding_partition_t *invalidated_bpart, jl_binding_partition_t *new_bpart, size_t new_world)
{
static jl_value_t *invalidate_code_for_globalref = NULL;
if (invalidate_code_for_globalref == NULL && jl_base_module != NULL)
invalidate_code_for_globalref = jl_get_global(jl_base_module, jl_symbol("invalidate_code_for_globalref!"));
if (!invalidate_code_for_globalref)
jl_error("Binding invalidation is not permitted during bootstrap.");
- if (jl_generating_output())
- jl_error("Binding invalidation is not permitted during image generation.");
- jl_value_t *boxed_world = jl_box_ulong(new_world);
- JL_GC_PUSH1(&boxed_world);
- jl_call3((jl_function_t*)invalidate_code_for_globalref, (jl_value_t*)ref, (jl_value_t*)invalidated_bpart, boxed_world);
+ jl_value_t **fargs;
+ JL_GC_PUSHARGS(fargs, 5);
+ fargs[0] = (jl_function_t*)invalidate_code_for_globalref;
+ fargs[1] = (jl_value_t*)ref;
+ fargs[2] = (jl_value_t*)invalidated_bpart;
+ fargs[3] = (jl_value_t*)new_bpart;
+ fargs[4] = jl_box_ulong(new_world);
+ jl_apply(fargs, 5);
JL_GC_POP();
}
@@ -1115,111 +1617,216 @@ JL_DLLEXPORT void jl_add_binding_backedge(jl_binding_t *b, jl_value_t *edge)
// Called for all GlobalRefs found in lowered code. Adds backedges for cross-module
// GlobalRefs.
-JL_DLLEXPORT void jl_maybe_add_binding_backedge(jl_globalref_t *gr, jl_module_t *defining_module, jl_value_t *edge)
+JL_DLLEXPORT int jl_maybe_add_binding_backedge(jl_binding_t *b, jl_value_t *edge, jl_method_t *for_method)
{
if (!edge)
- return;
+ return 0;
+ jl_module_t *defining_module = for_method->module;
// N.B.: The logic for evaluating whether a backedge is required must
// match the invalidation logic.
- if (gr->mod == defining_module) {
+ if (b->globalref->mod == defining_module) {
// No backedge required - invalidation will forward scan
- return;
+ jl_atomic_fetch_or(&b->flags, BINDING_FLAG_ANY_IMPLICIT_EDGES);
+ if (!(jl_atomic_fetch_or(&for_method->did_scan_source, 0x2) & 0x2))
+ jl_add_scanned_method(for_method->module, for_method);
+ return 1;
}
- jl_binding_t *b = gr->binding;
- if (!b)
- b = jl_get_module_binding(gr->mod, gr->name, 1);
- jl_add_binding_backedge(b, edge);
+ jl_add_binding_backedge(b, (jl_value_t*)edge);
+ return 0;
}
-JL_DLLEXPORT void jl_disable_binding(jl_globalref_t *gr)
+JL_DLLEXPORT jl_binding_partition_t *jl_replace_binding_locked(jl_binding_t *b,
+ jl_binding_partition_t *old_bpart, jl_value_t *restriction_val, enum jl_partition_kind kind, size_t new_world)
{
- jl_binding_t *b = gr->binding;
- b = jl_resolve_owner(b, gr->mod, gr->name, NULL, jl_current_task->world_age);
- jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
+ // Copy flags from old bpart
+ return jl_replace_binding_locked2(b, old_bpart, restriction_val, (size_t)kind | (size_t)(old_bpart->kind & PARTITION_MASK_FLAG),
+ new_world);
+}
- if (decode_restriction_kind(jl_atomic_load_relaxed(&bpart->restriction)) == BINDING_KIND_GUARD) {
- // Already guard
- return;
+extern JL_DLLEXPORT _Atomic(size_t) jl_first_image_replacement_world;
+JL_DLLEXPORT jl_binding_partition_t *jl_replace_binding_locked2(jl_binding_t *b,
+ jl_binding_partition_t *old_bpart, jl_value_t *restriction_val, size_t kind, size_t new_world)
+{
+ check_safe_newbinding(b->globalref->mod, b->globalref->name);
+
+ // Check if this is a replacing a binding in the system or a package image.
+ // Until the first such replacement, we can fast-path validation.
+ // For these purposes, we consider the `Main` module to be a non-sysimg module.
+ // This is legal, because we special case the `Main` in check_safe_import_from.
+ if (jl_object_in_image((jl_value_t*)b) && b->globalref->mod != jl_main_module && jl_atomic_load_relaxed(&jl_first_image_replacement_world) == ~(size_t)0)
+ jl_atomic_store_relaxed(&jl_first_image_replacement_world, new_world);
+
+ assert(jl_atomic_load_relaxed(&b->partitions) == old_bpart);
+ jl_binding_partition_t *new_bpart = new_binding_partition();
+ JL_GC_PUSH1(&new_bpart);
+ jl_atomic_store_relaxed(&new_bpart->min_world, new_world);
+ if ((kind & PARTITION_MASK_KIND) == PARTITION_FAKE_KIND_IMPLICIT_RECOMPUTE) {
+ assert(!restriction_val);
+ struct implicit_search_resolution resolution = jl_resolve_implicit_import(b, NULL, new_world, 0);
+ new_bpart->kind = resolution.ultimate_kind | (kind & PARTITION_MASK_FLAG);
+ new_bpart->restriction = resolution.binding_or_const;
+ assert(resolution.min_world <= new_world && resolution.max_world == ~(size_t)0);
+ if (new_bpart->kind == old_bpart->kind && new_bpart->restriction == old_bpart->restriction) {
+ JL_GC_POP();
+ return old_bpart;
+ }
+ }
+ else {
+ new_bpart->kind = kind;
+ new_bpart->restriction = restriction_val;
+ jl_gc_wb_fresh(new_bpart, restriction_val);
+ }
+ jl_atomic_store_release(&old_bpart->max_world, new_world-1);
+ jl_atomic_store_relaxed(&new_bpart->next, old_bpart);
+ jl_gc_wb_fresh(new_bpart, old_bpart);
+
+ if (((old_bpart->kind & PARTITION_FLAG_EXPORTED) || (kind & PARTITION_FLAG_EXPORTED)) && jl_require_world != ~(size_t)0) {
+ jl_atomic_store_release(&b->globalref->mod->export_set_changed_since_require_world, 1);
+ }
+
+ jl_atomic_store_release(&b->partitions, new_bpart);
+ jl_gc_wb(b, new_bpart);
+ JL_GC_POP();
+
+ if (jl_typeinf_world != 1) {
+ jl_task_t *ct = jl_current_task;
+ size_t last_world = ct->world_age;
+ ct->world_age = jl_typeinf_world;
+ jl_invalidate_binding_refs(b->globalref, old_bpart, new_bpart, new_world-1);
+ ct->world_age = last_world;
}
+ return new_bpart;
+}
+
+JL_DLLEXPORT jl_binding_partition_t *jl_replace_binding(jl_binding_t *b,
+ jl_binding_partition_t *old_bpart, jl_value_t *restriction_val, enum jl_partition_kind kind) {
+
JL_LOCK(&world_counter_lock);
- jl_task_t *ct = jl_current_task;
- size_t last_world = ct->world_age;
- size_t new_max_world = jl_atomic_load_acquire(&jl_world_counter);
- jl_atomic_store_release(&bpart->max_world, new_max_world);
- ct->world_age = jl_typeinf_world;
- jl_invalidate_binding_refs(gr, bpart, new_max_world);
- ct->world_age = last_world;
- jl_atomic_store_release(&jl_world_counter, new_max_world + 1);
+
+ if (jl_atomic_load_relaxed(&b->partitions) != old_bpart) {
+ JL_UNLOCK(&world_counter_lock);
+ return NULL;
+ }
+
+ size_t new_world = jl_atomic_load_acquire(&jl_world_counter)+1;
+ jl_binding_partition_t *bpart = jl_replace_binding_locked(b, old_bpart, restriction_val, kind, new_world);
+ if (bpart && jl_atomic_load_relaxed(&bpart->min_world) == new_world)
+ jl_atomic_store_release(&jl_world_counter, new_world);
+
JL_UNLOCK(&world_counter_lock);
+ return bpart;
}
JL_DLLEXPORT int jl_globalref_is_const(jl_globalref_t *gr)
{
jl_binding_t *b = gr->binding;
- b = jl_resolve_owner(b, gr->mod, gr->name, NULL, jl_current_task->world_age);
+ if (!b)
+ b = jl_get_module_binding(gr->mod, gr->name, 1);
jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- if (!bpart)
- return 0;
- return jl_bkind_is_some_constant(decode_restriction_kind(jl_atomic_load_relaxed(&bpart->restriction)));
+ jl_walk_binding_inplace(&b, &bpart, jl_current_task->world_age);
+ return jl_bkind_is_some_constant(jl_binding_kind(bpart));
}
-JL_DLLEXPORT void jl_force_binding_resolution(jl_globalref_t *gr, size_t world)
+JL_DLLEXPORT void jl_disable_binding(jl_globalref_t *gr)
{
jl_binding_t *b = gr->binding;
- jl_resolve_owner(b, gr->mod, gr->name, NULL, world);
+ if (!b)
+ b = jl_get_module_binding(gr->mod, gr->name, 1);
+ jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
+
+ if (jl_binding_kind(bpart) == PARTITION_KIND_GUARD) {
+ // Already guard
+ return;
+ }
+
+ for (;;)
+ if (jl_replace_binding(b, bpart, NULL, PARTITION_KIND_GUARD))
+ break;
}
JL_DLLEXPORT int jl_is_const(jl_module_t *m, jl_sym_t *var)
{
jl_binding_t *b = jl_get_binding(m, var);
jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- return b && jl_bkind_is_some_constant(decode_restriction_kind(jl_atomic_load_relaxed(&bpart->restriction)));
+ jl_walk_binding_inplace(&b, &bpart, jl_current_task->world_age);
+ return b && jl_bkind_is_some_constant(jl_binding_kind(bpart));
}
// set the deprecated flag for a binding:
// 0=not deprecated, 1=renamed, 2=moved to another package
+static const size_t DEPWARN_FLAGS = PARTITION_FLAG_DEPRECATED | PARTITION_FLAG_DEPWARN;
JL_DLLEXPORT void jl_deprecate_binding(jl_module_t *m, jl_sym_t *var, int flag)
{
- // XXX: this deprecates the original value, which might be imported from elsewhere
jl_binding_t *b = jl_get_binding(m, var);
- if (b) b->deprecated = flag;
+ size_t new_flags = flag == 1 ? PARTITION_FLAG_DEPRECATED | PARTITION_FLAG_DEPWARN :
+ flag == 2 ? PARTITION_FLAG_DEPRECATED :
+ 0;
+ JL_LOCK(&world_counter_lock);
+ size_t new_world = jl_atomic_load_acquire(&jl_world_counter)+1;
+ jl_binding_partition_t *old_bpart = jl_get_binding_partition(b, jl_current_task->world_age);
+ if ((old_bpart->kind & DEPWARN_FLAGS) == new_flags) {
+ JL_UNLOCK(&world_counter_lock);
+ return;
+ }
+ jl_replace_binding_locked2(b, old_bpart, old_bpart->restriction,
+ (old_bpart->kind & ~DEPWARN_FLAGS) | new_flags, new_world);
+ jl_atomic_store_release(&jl_world_counter, new_world);
+ JL_UNLOCK(&world_counter_lock);
}
-JL_DLLEXPORT int jl_is_binding_deprecated(jl_module_t *m, jl_sym_t *var)
+static int should_depwarn(jl_binding_t *b, uint8_t flag)
{
- if (jl_binding_resolved_p(m, var)) {
- // XXX: this only considers if the original is deprecated, not this precise binding
- jl_binding_t *b = jl_get_binding(m, var);
- return b && b->deprecated;
- }
+ // We consider bindings deprecated, if:
+ //
+ // 1. The binding itself is deprecated, or
+ // 2. We implicitly import any deprecated binding.
+ //
+ // However, we do not consider the binding deprecated if the import was an explicit
+ // (`using` or `import`). The logic here is that the thing that needs to be adjusted
+ // is not the use itself, but rather the `using` or `import` (which already prints
+ // an appropriate warning).
+ jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
+ if (bpart->kind & flag)
+ return 1;
return 0;
}
-void jl_binding_deprecation_warning(jl_module_t *m, jl_sym_t *s, jl_binding_t *b)
+JL_DLLEXPORT void jl_binding_deprecation_check(jl_binding_t *b)
{
- // Only print a warning for deprecated == 1 (renamed).
- // For deprecated == 2 (moved to a package) the binding is to a function
- // that throws an error, so we don't want to print a warning too.
- if (b->deprecated == 1 && jl_options.depwarn) {
- if (jl_options.depwarn != JL_OPTIONS_DEPWARN_ERROR)
- jl_printf(JL_STDERR, "WARNING: ");
- jl_printf(JL_STDERR, "%s.%s is deprecated",
- jl_symbol_name(m->name), jl_symbol_name(s));
- jl_binding_dep_message(m, s, b);
+ if (jl_options.depwarn && should_depwarn(b, PARTITION_FLAG_DEPWARN))
+ jl_binding_deprecation_warning(b);
+}
- if (jl_options.depwarn != JL_OPTIONS_DEPWARN_ERROR) {
- if (jl_lineno != 0) {
- jl_printf(JL_STDERR, " likely near %s:%d\n", jl_filename, jl_lineno);
- }
- }
+JL_DLLEXPORT int jl_is_binding_deprecated(jl_module_t *m, jl_sym_t *var)
+{
+ jl_binding_t *b = jl_get_module_binding(m, var, 0);
+ if (!b)
+ return 0;
+ return should_depwarn(b, PARTITION_FLAG_DEPRECATED);
+}
- if (jl_options.depwarn == JL_OPTIONS_DEPWARN_ERROR) {
- jl_errorf("use of deprecated variable: %s.%s",
- jl_symbol_name(m->name),
- jl_symbol_name(s));
+void jl_binding_deprecation_warning(jl_binding_t *b)
+{
+ if (jl_options.depwarn != JL_OPTIONS_DEPWARN_ERROR)
+ jl_printf(JL_STDERR, "WARNING: ");
+ jl_printf(JL_STDERR, "Use of ");
+
+ jl_printf(JL_STDERR, "%s.%s is deprecated",
+ jl_symbol_name(b->globalref->mod->name), jl_symbol_name(b->globalref->name));
+ jl_binding_dep_message(b);
+
+ if (jl_options.depwarn != JL_OPTIONS_DEPWARN_ERROR) {
+ if (jl_lineno != 0) {
+ jl_printf(JL_STDERR, " likely near %s:%d\n", jl_filename, jl_lineno);
}
}
+
+ if (jl_options.depwarn == JL_OPTIONS_DEPWARN_ERROR) {
+ jl_errorf("use of deprecated variable: %s.%s",
+ jl_symbol_name(b->globalref->mod->name),
+ jl_symbol_name(b->globalref->name));
+ }
}
// For a generally writable binding (checked using jl_check_binding_currently_writable in this world age), check whether
@@ -1228,23 +1835,9 @@ jl_value_t *jl_check_binding_assign_value(jl_binding_t *b JL_PROPAGATES_ROOT, jl
{
JL_GC_PUSH1(&rhs); // callee-rooted
jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
- assert(!jl_bkind_is_some_guard(decode_restriction_kind(pku)) && !jl_bkind_is_some_import(decode_restriction_kind(pku)));
- if (jl_bkind_is_some_constant(decode_restriction_kind(pku))) {
- jl_value_t *old = decode_restriction_value(pku);
- JL_GC_PROMISE_ROOTED(old);
- if (jl_egal(rhs, old)) {
- JL_GC_POP();
- return NULL;
- }
- if (jl_typeof(rhs) == jl_typeof(old))
- jl_errorf("invalid redefinition of constant %s.%s. This redefinition may be permitted using the `const` keyword.",
- jl_symbol_name(mod->name), jl_symbol_name(var));
- else
- jl_errorf("invalid redefinition of constant %s.%s.",
- jl_symbol_name(mod->name), jl_symbol_name(var));
- }
- jl_value_t *old_ty = decode_restriction_value(pku);
+ enum jl_partition_kind kind = jl_binding_kind(bpart);
+ assert(kind == PARTITION_KIND_DECLARED || kind == PARTITION_KIND_GLOBAL);
+ jl_value_t *old_ty = kind == PARTITION_KIND_DECLARED ? (jl_value_t*)jl_any_type : bpart->restriction;
JL_GC_PROMISE_ROOTED(old_ty);
if (old_ty != (jl_value_t*)jl_any_type && jl_typeof(rhs) != old_ty) {
if (!jl_isa(rhs, old_ty))
@@ -1282,12 +1875,12 @@ JL_DLLEXPORT jl_value_t *jl_checked_replace(jl_binding_t *b, jl_module_t *mod, j
JL_DLLEXPORT jl_value_t *jl_checked_modify(jl_binding_t *b, jl_module_t *mod, jl_sym_t *var, jl_value_t *op, jl_value_t *rhs)
{
jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
- assert(!jl_bkind_is_some_guard(decode_restriction_kind(pku)) && !jl_bkind_is_some_import(decode_restriction_kind(pku)));
- if (jl_bkind_is_some_constant(decode_restriction_kind(pku)))
- jl_errorf("invalid redefinition of constant %s.%s",
+ enum jl_partition_kind kind = jl_binding_kind(bpart);
+ assert(!jl_bkind_is_some_guard(kind) && !jl_bkind_is_some_import(kind));
+ if (jl_bkind_is_some_constant(kind))
+ jl_errorf("invalid assignment to constant %s.%s",
jl_symbol_name(mod->name), jl_symbol_name(var));
- jl_value_t *ty = decode_restriction_value(pku);
+ jl_value_t *ty = bpart->restriction;
JL_GC_PROMISE_ROOTED(ty);
return modify_value(ty, &b->value, (jl_value_t*)b, op, rhs, 1, mod, var);
}
@@ -1334,12 +1927,12 @@ void append_module_names(jl_array_t* a, jl_module_t *m, int all, int imported, i
int hidden = jl_symbol_name(asname)[0]=='#';
int main_public = (m == jl_main_module && !(asname == jl_eval_sym || asname == jl_include_sym));
jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- enum jl_partition_kind kind = decode_restriction_kind(jl_atomic_load_relaxed(&bpart->restriction));
- if (((b->publicp) ||
- (imported && (kind == BINDING_KIND_CONST_IMPORT || kind == BINDING_KIND_IMPORTED)) ||
- (usings && kind == BINDING_KIND_EXPLICIT) ||
- ((kind == BINDING_KIND_GLOBAL || kind == BINDING_KIND_CONST || kind == BINDING_KIND_DECLARED) && (all || main_public))) &&
- (all || (!b->deprecated && !hidden)))
+ enum jl_partition_kind kind = jl_binding_kind(bpart);
+ if (((jl_atomic_load_relaxed(&b->flags) & BINDING_FLAG_PUBLICP) ||
+ (imported && (kind == PARTITION_KIND_CONST_IMPORT || kind == PARTITION_KIND_IMPORTED)) ||
+ (usings && kind == PARTITION_KIND_EXPLICIT) ||
+ ((kind == PARTITION_KIND_GLOBAL || kind == PARTITION_KIND_CONST || kind == PARTITION_KIND_DECLARED) && (all || main_public))) &&
+ (all || (!(bpart->kind & PARTITION_FLAG_DEPRECATED) && !hidden)))
_append_symbol_to_bindings_array(a, asname);
}
}
@@ -1351,7 +1944,8 @@ void append_exported_names(jl_array_t* a, jl_module_t *m, int all)
jl_binding_t *b = (jl_binding_t*)jl_svecref(table, i);
if ((void*)b == jl_nothing)
break;
- if (b->exportp && (all || !b->deprecated))
+ jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
+ if ((bpart->kind & PARTITION_FLAG_EXPORTED) && (all || !(bpart->kind & PARTITION_FLAG_DEPRECATED)))
_append_symbol_to_bindings_array(a, b->globalref->name);
}
}
@@ -1421,8 +2015,8 @@ JL_DLLEXPORT void jl_clear_implicit_imports(jl_module_t *m)
if ((void*)b == jl_nothing)
break;
jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- if (decode_restriction_kind(jl_atomic_load_relaxed(&bpart->restriction)) == BINDING_KIND_IMPLICIT) {
- jl_atomic_store_relaxed(&bpart->restriction, encode_restriction(NULL, BINDING_KIND_GUARD));
+ if (jl_bkind_is_some_implicit(jl_binding_kind(bpart))) {
+ jl_atomic_store_relaxed(&b->partitions, NULL);
}
}
JL_UNLOCK(&m->lock);
diff --git a/src/null_sysimage.c b/src/null_sysimage.c
new file mode 100644
index 0000000000000..386842f0c4e77
--- /dev/null
+++ b/src/null_sysimage.c
@@ -0,0 +1,15 @@
+// This file is a part of Julia. License is MIT: https://julialang.org/license
+
+#include
+#include "processor.h"
+
+/**
+ * These symbols support statically linking the sysimage with libjulia-internal.
+ *
+ * Here we provide dummy definitions that are used when these are not linked
+ * together (the default build configuration). The 0 value of jl_system_image_size
+ * is used as a sentinel to indicate that the sysimage should be loaded externally.
+ **/
+char jl_system_image_data = 0;
+size_t jl_system_image_size = 0;
+jl_image_pointers_t jl_image_pointers = { 0 };
diff --git a/src/opaque_closure.c b/src/opaque_closure.c
index a10b5c617753c..2e39d5965b45a 100644
--- a/src/opaque_closure.c
+++ b/src/opaque_closure.c
@@ -159,7 +159,6 @@ JL_DLLEXPORT jl_opaque_closure_t *jl_new_opaque_closure_from_code_info(jl_tuplet
size_t world = jl_current_task->world_age;
// these are only legal in the current world since they are not in any tables
jl_atomic_store_release(&meth->primary_world, world);
- jl_atomic_store_release(&meth->deleted_world, world);
if (isinferred) {
jl_value_t *argslotty = jl_array_ptr_ref(ci->slottypes, 0);
diff --git a/src/options.h b/src/options.h
index 0715069faab32..fb2797ffd0336 100644
--- a/src/options.h
+++ b/src/options.h
@@ -144,6 +144,9 @@
#define MACHINE_EXCLUSIVE_NAME "JULIA_EXCLUSIVE"
#define DEFAULT_MACHINE_EXCLUSIVE 0
+// heartbeats
+#define JL_HEARTBEAT_THREAD
+
// sanitizer defaults ---------------------------------------------------------
// Automatically enable MEMDEBUG and KEEP_BODIES for the sanitizers
diff --git a/src/precompile_utils.c b/src/precompile_utils.c
index 8906b3eb586d3..295f91ad31e67 100644
--- a/src/precompile_utils.c
+++ b/src/precompile_utils.c
@@ -170,6 +170,10 @@ static void jl_compile_all_defs(jl_array_t *mis, int all)
size_t i, l = jl_array_nrows(allmeths);
for (i = 0; i < l; i++) {
jl_method_t *m = (jl_method_t*)jl_array_ptr_ref(allmeths, i);
+ int is_macro_method = jl_symbol_name(m->name)[0] == '@';
+ if (is_macro_method && !all)
+ continue; // Avoid inference / pre-compilation for macros
+
if (jl_is_datatype(m->sig) && jl_isa_compileable_sig((jl_tupletype_t*)m->sig, jl_emptysvec, m)) {
// method has a single compilable specialization, e.g. its definition
// signature is concrete. in this case we can just hint it.
@@ -271,7 +275,7 @@ static void *jl_precompile_(jl_array_t *m, int external_linkage)
}
else {
assert(jl_is_simplevector(item));
- assert(jl_svec_len(item) == 2);
+ assert(jl_svec_len(item) == 2 || jl_svec_len(item) == 3);
jl_array_ptr_1d_push(m2, item);
}
}
@@ -382,7 +386,18 @@ static void *jl_precompile_trimmed(size_t world)
jl_array_ptr_1d_push(m, ccallable);
}
- void *native_code = jl_create_native(m, NULL, jl_options.trim, 0, world);
+ void *native_code = NULL;
+ JL_TRY {
+ native_code = jl_create_native(m, NULL, jl_options.trim, 0, world);
+ } JL_CATCH {
+ jl_value_t *exc = jl_current_exception(jl_current_task);
+ if (!jl_isa(exc, (jl_value_t*)jl_trimfailure_type))
+ jl_rethrow(); // unexpected exception, expose the stacktrace
+
+ // The verification check failed. The error message should already have
+ // been printed, so give up here and exit (w/o a stack trace).
+ exit(1);
+ }
JL_GC_POP();
return native_code;
}
@@ -401,20 +416,21 @@ static void jl_rebuild_methtables(arraylist_t* MIs, htable_t* mtables)
ptrhash_put(mtables, old_mt, jl_new_method_table(name, m->module));
jl_methtable_t *mt = (jl_methtable_t*)ptrhash_get(mtables, old_mt);
size_t world = jl_atomic_load_acquire(&jl_world_counter);
- jl_value_t * lookup = jl_methtable_lookup(mt, m->sig, world);
+ jl_value_t *lookup = jl_methtable_lookup(mt, m->sig, world);
// Check if the method is already in the new table, if not then insert it there
if (lookup == jl_nothing || (jl_method_t*)lookup != m) {
//TODO: should this be a function like unsafe_insert_method?
size_t min_world = jl_atomic_load_relaxed(&m->primary_world);
- size_t max_world = jl_atomic_load_relaxed(&m->deleted_world);
+ size_t max_world = ~(size_t)0;
+ assert(min_world == jl_atomic_load_relaxed(&m->primary_world));
+ int dispatch_status = jl_atomic_load_relaxed(&m->dispatch_status);
jl_atomic_store_relaxed(&m->primary_world, ~(size_t)0);
- jl_atomic_store_relaxed(&m->deleted_world, 1);
+ jl_atomic_store_relaxed(&m->dispatch_status, 0);
jl_typemap_entry_t *newentry = jl_method_table_add(mt, m, NULL);
jl_atomic_store_relaxed(&m->primary_world, min_world);
- jl_atomic_store_relaxed(&m->deleted_world, max_world);
+ jl_atomic_store_relaxed(&m->dispatch_status, dispatch_status);
jl_atomic_store_relaxed(&newentry->min_world, min_world);
- jl_atomic_store_relaxed(&newentry->max_world, max_world);
+ jl_atomic_store_relaxed(&newentry->max_world, max_world); // short-circuit jl_method_table_insert
}
}
-
}
diff --git a/src/processor.cpp b/src/processor.cpp
index 3edebcc2f3ae6..6f95ee7f3790a 100644
--- a/src/processor.cpp
+++ b/src/processor.cpp
@@ -504,7 +504,8 @@ static inline llvm::SmallVector, 0>
parse_cmdline(const char *option, F &&feature_cb)
{
if (!option)
- option = "native";
+ abort();
+
llvm::SmallVector, 0> res;
TargetData arg{};
auto reset_arg = [&] {
@@ -612,36 +613,29 @@ parse_cmdline(const char *option, F &&feature_cb)
// Cached version of command line parsing
template
-static inline llvm::SmallVector, 0> &get_cmdline_targets(F &&feature_cb)
+static inline llvm::SmallVector, 0> &get_cmdline_targets(const char *cpu_target, F &&feature_cb)
{
static llvm::SmallVector, 0> targets =
- parse_cmdline(jl_options.cpu_target, std::forward(feature_cb));
+ parse_cmdline(cpu_target, std::forward(feature_cb));
return targets;
}
-extern "C" {
-void *image_pointers_unavailable;
-extern void * JL_WEAK_SYMBOL_OR_ALIAS_DEFAULT(image_pointers_unavailable) jl_image_pointers;
-}
-
// Load sysimg, use the `callback` for dispatch and perform all relocations
// for the selected target.
template
-static inline jl_image_t parse_sysimg(void *hdl, F &&callback)
+static inline jl_image_t parse_sysimg(jl_image_buf_t image, F &&callback, void *ctx)
{
JL_TIMING(LOAD_IMAGE, LOAD_Processor);
jl_image_t res{};
- const jl_image_pointers_t *pointers;
- if (hdl == jl_exe_handle && &jl_image_pointers != JL_WEAK_SYMBOL_DEFAULT(image_pointers_unavailable))
- pointers = (const jl_image_pointers_t *)&jl_image_pointers;
- else
- jl_dlsym(hdl, "jl_image_pointers", (void**)&pointers, 1);
+ if (image.kind != JL_IMAGE_KIND_SO)
+ return res;
+ const jl_image_pointers_t *pointers = (const jl_image_pointers_t *)image.pointers;
const void *ids = pointers->target_data;
jl_value_t* rejection_reason = nullptr;
JL_GC_PUSH1(&rejection_reason);
- uint32_t target_idx = callback(ids, &rejection_reason);
+ uint32_t target_idx = callback(ctx, ids, &rejection_reason);
if (target_idx == UINT32_MAX) {
jl_error(jl_string_ptr(rejection_reason));
}
@@ -799,17 +793,7 @@ static inline jl_image_t parse_sysimg(void *hdl, F &&callback)
res.fptrs.nclones = clones.size();
}
-#ifdef _OS_WINDOWS_
- res.base = (intptr_t)hdl;
-#else
- Dl_info dlinfo;
- if (dladdr((void*)pointers, &dlinfo) != 0) {
- res.base = (intptr_t)dlinfo.dli_fbase;
- }
- else {
- res.base = 0;
- }
-#endif
+ res.base = image.base;
{
void *pgcstack_func_slot = pointers->ptls->pgcstack_func_slot;
@@ -1029,7 +1013,7 @@ JL_DLLEXPORT jl_value_t *jl_get_cpu_features(void)
}
extern "C" JL_DLLEXPORT jl_value_t* jl_reflect_clone_targets() {
- auto specs = jl_get_llvm_clone_targets();
+ auto specs = jl_get_llvm_clone_targets(jl_options.cpu_target);
const uint32_t base_flags = 0;
llvm::SmallVector data;
auto push_i32 = [&] (uint32_t v) {
diff --git a/src/processor.h b/src/processor.h
index 82a1121aaf7c4..65b634fd0ba26 100644
--- a/src/processor.h
+++ b/src/processor.h
@@ -64,6 +64,7 @@ JL_DLLEXPORT int jl_test_cpu_feature(jl_cpu_feature_t feature);
static const uint32_t jl_sysimg_tag_mask = 0x80000000u;
static const uint32_t jl_sysimg_val_mask = ~((uint32_t)0x80000000u);
+// A parsed image file
typedef struct _jl_image_fptrs_t {
// number of functions
uint32_t nptrs;
@@ -82,14 +83,14 @@ typedef struct _jl_image_fptrs_t {
const uint32_t *clone_idxs;
} jl_image_fptrs_t;
-typedef struct {
+struct _jl_image_t {
uint64_t base;
const char *gvars_base;
const int32_t *gvars_offsets;
uint32_t ngvars;
jl_image_fptrs_t fptrs;
void **jl_small_typeof;
-} jl_image_t;
+};
// The header for each image
// Details important counts about the image
@@ -206,8 +207,8 @@ typedef struct {
*
* Return the data about the function pointers selected.
*/
-jl_image_t jl_init_processor_sysimg(void *hdl);
-jl_image_t jl_init_processor_pkgimg(void *hdl);
+jl_image_t jl_init_processor_sysimg(jl_image_buf_t image, const char *cpu_target);
+jl_image_t jl_init_processor_pkgimg(jl_image_buf_t image);
// Return the name of the host CPU as a julia string.
JL_DLLEXPORT jl_value_t *jl_get_cpu_name(void);
@@ -224,6 +225,18 @@ JL_DLLEXPORT int32_t jl_set_zero_subnormals(int8_t isZero);
JL_DLLEXPORT int32_t jl_get_zero_subnormals(void);
JL_DLLEXPORT int32_t jl_set_default_nans(int8_t isDefault);
JL_DLLEXPORT int32_t jl_get_default_nans(void);
+
+/**
+ * System image contents.
+ *
+ * These symbols are typically dummy values, unless statically linking
+ * libjulia-* and the sysimage together (see null_sysimage.c), in which
+ * case they allow accessing the local copy of the sysimage.
+ **/
+extern char jl_system_image_data;
+extern size_t jl_system_image_size;
+extern jl_image_pointers_t jl_image_pointers;
+
#ifdef __cplusplus
}
@@ -239,7 +252,7 @@ extern JL_DLLEXPORT bool jl_processor_print_help;
* If the detected/specified CPU name is not available on the LLVM version specified,
* a fallback CPU name will be used. Unsupported features will be ignored.
*/
-extern "C" JL_DLLEXPORT std::pair> jl_get_llvm_target(bool imaging, uint32_t &flags) JL_NOTSAFEPOINT;
+extern "C" JL_DLLEXPORT std::pair> jl_get_llvm_target(const char *cpu_target, bool imaging, uint32_t &flags) JL_NOTSAFEPOINT;
/**
* Returns the CPU name and feature string to be used by LLVM disassembler.
@@ -263,7 +276,7 @@ struct jl_target_spec_t {
/**
* Return the list of targets to clone
*/
-extern "C" JL_DLLEXPORT llvm::SmallVector jl_get_llvm_clone_targets(void) JL_NOTSAFEPOINT;
+extern "C" JL_DLLEXPORT llvm::SmallVector jl_get_llvm_clone_targets(const char *cpu_target) JL_NOTSAFEPOINT;
// NOLINTEND(clang-diagnostic-return-type-c-linkage)
struct FeatureName {
const char *name;
diff --git a/src/processor_arm.cpp b/src/processor_arm.cpp
index d28e527ed44e8..66704a718a14d 100644
--- a/src/processor_arm.cpp
+++ b/src/processor_arm.cpp
@@ -1519,7 +1519,7 @@ static inline void disable_depends(FeatureList &features)
::disable_depends(features, Feature::deps, sizeof(Feature::deps) / sizeof(FeatureDep));
}
-static const llvm::SmallVector, 0> &get_cmdline_targets(void)
+static const llvm::SmallVector, 0> &get_cmdline_targets(const char *cpu_target)
{
auto feature_cb = [] (const char *str, size_t len, FeatureList &list) {
#ifdef _CPU_AARCH64_
@@ -1536,7 +1536,7 @@ static const llvm::SmallVector, 0> &get_cmdline_targets(v
set_bit(list, fbit, true);
return true;
};
- auto &targets = ::get_cmdline_targets(feature_cb);
+ auto &targets = ::get_cmdline_targets(cpu_target, feature_cb);
for (auto &t: targets) {
if (auto nname = normalize_cpu_name(t.name)) {
t.name = nname;
@@ -1599,10 +1599,11 @@ static int max_vector_size(const FeatureList &features)
#endif
}
-static uint32_t sysimg_init_cb(const void *id, jl_value_t **rejection_reason)
+static uint32_t sysimg_init_cb(void *ctx, const void *id, jl_value_t **rejection_reason)
{
// First see what target is requested for the JIT.
- auto &cmdline = get_cmdline_targets();
+ const char *cpu_target = (const char *)ctx;
+ auto &cmdline = get_cmdline_targets(cpu_target);
TargetData target = arg_target_data(cmdline[0], true);
// Then find the best match in the sysimg
auto sysimg = deserialize_target_data((const uint8_t*)id);
@@ -1626,7 +1627,7 @@ static uint32_t sysimg_init_cb(const void *id, jl_value_t **rejection_reason)
return match.best_idx;
}
-static uint32_t pkgimg_init_cb(const void *id, jl_value_t **rejection_reason JL_REQUIRE_ROOTED_SLOT)
+static uint32_t pkgimg_init_cb(void *ctx, const void *id, jl_value_t **rejection_reason JL_REQUIRE_ROOTED_SLOT)
{
TargetData target = jit_targets.front();
auto pkgimg = deserialize_target_data((const uint8_t*)id);
@@ -1639,9 +1640,9 @@ static uint32_t pkgimg_init_cb(const void *id, jl_value_t **rejection_reason JL_
return match.best_idx;
}
-static void ensure_jit_target(bool imaging)
+static void ensure_jit_target(const char *cpu_target, bool imaging)
{
- auto &cmdline = get_cmdline_targets();
+ auto &cmdline = get_cmdline_targets(cpu_target);
check_cmdline(cmdline, imaging);
if (!jit_targets.empty())
return;
@@ -1852,36 +1853,36 @@ JL_DLLEXPORT jl_value_t *jl_cpu_has_fma(int bits)
#endif
}
-jl_image_t jl_init_processor_sysimg(void *hdl)
+jl_image_t jl_init_processor_sysimg(jl_image_buf_t image, const char *cpu_target)
{
if (!jit_targets.empty())
jl_error("JIT targets already initialized");
- return parse_sysimg(hdl, sysimg_init_cb);
+ return parse_sysimg(image, sysimg_init_cb, (void *)cpu_target);
}
-jl_image_t jl_init_processor_pkgimg(void *hdl)
+jl_image_t jl_init_processor_pkgimg(jl_image_buf_t image)
{
if (jit_targets.empty())
jl_error("JIT targets not initialized");
if (jit_targets.size() > 1)
jl_error("Expected only one JIT target");
- return parse_sysimg(hdl, pkgimg_init_cb);
+ return parse_sysimg(image, pkgimg_init_cb, NULL);
}
JL_DLLEXPORT jl_value_t* jl_check_pkgimage_clones(char *data)
{
jl_value_t *rejection_reason = NULL;
JL_GC_PUSH1(&rejection_reason);
- uint32_t match_idx = pkgimg_init_cb(data, &rejection_reason);
+ uint32_t match_idx = pkgimg_init_cb(NULL, data, &rejection_reason);
JL_GC_POP();
if (match_idx == UINT32_MAX)
return rejection_reason;
return jl_nothing;
}
-std::pair> jl_get_llvm_target(bool imaging, uint32_t &flags)
+std::pair> jl_get_llvm_target(const char *cpu_target, bool imaging, uint32_t &flags)
{
- ensure_jit_target(imaging);
+ ensure_jit_target(cpu_target, imaging);
flags = jit_targets[0].en.flags;
return get_llvm_target_vec(jit_targets[0]);
}
@@ -1900,10 +1901,10 @@ const std::pair &jl_get_llvm_disasm_target(void)
}
#ifndef __clang_gcanalyzer__
-llvm::SmallVector jl_get_llvm_clone_targets(void)
+llvm::SmallVector jl_get_llvm_clone_targets(const char *cpu_target)
{
- auto &cmdline = get_cmdline_targets();
+ auto &cmdline = get_cmdline_targets(cpu_target);
check_cmdline(cmdline, true);
llvm::SmallVector, 0> image_targets;
for (auto &arg: cmdline) {
diff --git a/src/processor_fallback.cpp b/src/processor_fallback.cpp
index f8d9eb9fd9e73..c8c8feb072345 100644
--- a/src/processor_fallback.cpp
+++ b/src/processor_fallback.cpp
@@ -13,12 +13,12 @@ static inline const std::string &host_cpu_name()
return name;
}
-static const llvm::SmallVector, 0> &get_cmdline_targets(void)
+static const llvm::SmallVector, 0> &get_cmdline_targets(const char *cpu_target)
{
auto feature_cb = [] (const char*, size_t, FeatureList<1>&) {
return false;
};
- return ::get_cmdline_targets<1>(feature_cb);
+ return ::get_cmdline_targets<1>(cpu_target, feature_cb);
}
static llvm::SmallVector, 0> jit_targets;
@@ -36,10 +36,11 @@ static TargetData<1> arg_target_data(const TargetData<1> &arg, bool require_host
return res;
}
-static uint32_t sysimg_init_cb(const void *id, jl_value_t **rejection_reason)
+static uint32_t sysimg_init_cb(void *ctx, const void *id, jl_value_t **rejection_reason)
{
// First see what target is requested for the JIT.
- auto &cmdline = get_cmdline_targets();
+ const char *cpu_target = (const char *)ctx;
+ auto &cmdline = get_cmdline_targets(cpu_target);
TargetData<1> target = arg_target_data(cmdline[0], true);
// Find the last name match or use the default one.
uint32_t best_idx = 0;
@@ -54,7 +55,7 @@ static uint32_t sysimg_init_cb(const void *id, jl_value_t **rejection_reason)
return best_idx;
}
-static uint32_t pkgimg_init_cb(const void *id, jl_value_t **rejection_reason)
+static uint32_t pkgimg_init_cb(void *ctx, const void *id, jl_value_t **rejection_reason)
{
TargetData<1> target = jit_targets.front();
// Find the last name match or use the default one.
@@ -70,9 +71,9 @@ static uint32_t pkgimg_init_cb(const void *id, jl_value_t **rejection_reason)
return best_idx;
}
-static void ensure_jit_target(bool imaging)
+static void ensure_jit_target(const char *cpu_target, bool imaging)
{
- auto &cmdline = get_cmdline_targets();
+ auto &cmdline = get_cmdline_targets(cpu_target);
check_cmdline(cmdline, imaging);
if (!jit_targets.empty())
return;
@@ -115,25 +116,25 @@ get_llvm_target_str(const TargetData<1> &data)
using namespace Fallback;
-jl_image_t jl_init_processor_sysimg(void *hdl)
+jl_image_t jl_init_processor_sysimg(jl_image_buf_t image, const char *cpu_target)
{
if (!jit_targets.empty())
jl_error("JIT targets already initialized");
- return parse_sysimg(hdl, sysimg_init_cb);
+ return parse_sysimg(image, sysimg_init_cb, (void *)cpu_target);
}
-jl_image_t jl_init_processor_pkgimg(void *hdl)
+jl_image_t jl_init_processor_pkgimg(jl_image_buf_t image)
{
if (jit_targets.empty())
jl_error("JIT targets not initialized");
if (jit_targets.size() > 1)
jl_error("Expected only one JIT target");
- return parse_sysimg(hdl, pkgimg_init_cb);
+ return parse_sysimg(image, pkgimg_init_cb, NULL);
}
-std::pair> jl_get_llvm_target(bool imaging, uint32_t &flags)
+std::pair> jl_get_llvm_target(const char *cpu_target, bool imaging, uint32_t &flags)
{
- ensure_jit_target(imaging);
+ ensure_jit_target(cpu_target, imaging);
flags = jit_targets[0].en.flags;
return get_llvm_target_vec(jit_targets[0]);
}
@@ -145,10 +146,10 @@ const std::pair &jl_get_llvm_disasm_target(void)
return res;
}
#ifndef __clang_gcanalyzer__
-llvm::SmallVector jl_get_llvm_clone_targets(void)
+llvm::SmallVector jl_get_llvm_clone_targets(const char *cpu_target)
{
- auto &cmdline = get_cmdline_targets();
+ auto &cmdline = get_cmdline_targets(cpu_target);
check_cmdline(cmdline, true);
llvm::SmallVector, 0> image_targets;
for (auto &arg: cmdline) {
@@ -192,7 +193,7 @@ JL_DLLEXPORT jl_value_t* jl_check_pkgimage_clones(char *data)
{
jl_value_t *rejection_reason = NULL;
JL_GC_PUSH1(&rejection_reason);
- uint32_t match_idx = pkgimg_init_cb(data, &rejection_reason);
+ uint32_t match_idx = pkgimg_init_cb(NULL, data, &rejection_reason);
JL_GC_POP();
if (match_idx == UINT32_MAX)
return rejection_reason;
diff --git a/src/processor_x86.cpp b/src/processor_x86.cpp
index bf765be160ed2..bd624943083ae 100644
--- a/src/processor_x86.cpp
+++ b/src/processor_x86.cpp
@@ -809,7 +809,7 @@ static inline void disable_depends(FeatureList &features)
::disable_depends(features, Feature::deps, sizeof(Feature::deps) / sizeof(FeatureDep));
}
-static const llvm::SmallVector, 0> &get_cmdline_targets(void)
+static const llvm::SmallVector, 0> &get_cmdline_targets(const char *cpu_target)
{
auto feature_cb = [] (const char *str, size_t len, FeatureList &list) {
auto fbit = find_feature_bit(feature_names, nfeature_names, str, len);
@@ -818,7 +818,7 @@ static const llvm::SmallVector, 0> &get_cmdline_targets(v
set_bit(list, fbit, true);
return true;
};
- auto &targets = ::get_cmdline_targets(feature_cb);
+ auto &targets = ::get_cmdline_targets(cpu_target, feature_cb);
for (auto &t: targets) {
if (auto nname = normalize_cpu_name(t.name)) {
t.name = nname;
@@ -878,10 +878,11 @@ static int max_vector_size(const FeatureList &features)
return 16;
}
-static uint32_t sysimg_init_cb(const void *id, jl_value_t** rejection_reason)
+static uint32_t sysimg_init_cb(void *ctx, const void *id, jl_value_t** rejection_reason)
{
// First see what target is requested for the JIT.
- auto &cmdline = get_cmdline_targets();
+ const char *cpu_target = (const char *)ctx;
+ auto &cmdline = get_cmdline_targets(cpu_target);
TargetData target = arg_target_data(cmdline[0], true);
// Then find the best match in the sysimg
auto sysimg = deserialize_target_data((const uint8_t*)id);
@@ -924,7 +925,7 @@ static uint32_t sysimg_init_cb(const void *id, jl_value_t** rejection_reason)
return match.best_idx;
}
-static uint32_t pkgimg_init_cb(const void *id, jl_value_t **rejection_reason)
+static uint32_t pkgimg_init_cb(void *ctx, const void *id, jl_value_t **rejection_reason)
{
TargetData target = jit_targets.front();
auto pkgimg = deserialize_target_data((const uint8_t*)id);
@@ -939,9 +940,9 @@ static uint32_t pkgimg_init_cb(const void *id, jl_value_t **rejection_reason)
//This function serves as a fallback during bootstrapping, at that point we don't have a sysimage with native code
// so we won't call sysimg_init_cb, else this function shouldn't do anything.
-static void ensure_jit_target(bool imaging)
+static void ensure_jit_target(const char *cpu_target, bool imaging)
{
- auto &cmdline = get_cmdline_targets();
+ auto &cmdline = get_cmdline_targets(cpu_target);
check_cmdline(cmdline, imaging);
if (!jit_targets.empty())
return;
@@ -1084,7 +1085,7 @@ JL_DLLEXPORT jl_value_t* jl_check_pkgimage_clones(char *data)
{
jl_value_t *rejection_reason = NULL;
JL_GC_PUSH1(&rejection_reason);
- uint32_t match_idx = pkgimg_init_cb(data, &rejection_reason);
+ uint32_t match_idx = pkgimg_init_cb(NULL, data, &rejection_reason);
JL_GC_POP();
if (match_idx == UINT32_MAX)
return rejection_reason;
@@ -1101,25 +1102,25 @@ JL_DLLEXPORT jl_value_t *jl_cpu_has_fma(int bits)
return jl_false;
}
-jl_image_t jl_init_processor_sysimg(void *hdl)
+jl_image_t jl_init_processor_sysimg(jl_image_buf_t image, const char *cpu_target)
{
if (!jit_targets.empty())
jl_error("JIT targets already initialized");
- return parse_sysimg(hdl, sysimg_init_cb);
+ return parse_sysimg(image, sysimg_init_cb, (void *)cpu_target);
}
-jl_image_t jl_init_processor_pkgimg(void *hdl)
+jl_image_t jl_init_processor_pkgimg(jl_image_buf_t image)
{
if (jit_targets.empty())
jl_error("JIT targets not initialized");
if (jit_targets.size() > 1)
jl_error("Expected only one JIT target");
- return parse_sysimg(hdl, pkgimg_init_cb);
+ return parse_sysimg(image, pkgimg_init_cb, NULL);
}
-std::pair> jl_get_llvm_target(bool imaging, uint32_t &flags)
+std::pair> jl_get_llvm_target(const char *cpu_target, bool imaging, uint32_t &flags)
{
- ensure_jit_target(imaging);
+ ensure_jit_target(cpu_target, imaging);
flags = jit_targets[0].en.flags;
return get_llvm_target_vec(jit_targets[0]);
}
@@ -1132,9 +1133,10 @@ const std::pair &jl_get_llvm_disasm_target(void)
}
//This function parses the -C command line to figure out which targets to multiversion to.
#ifndef __clang_gcanalyzer__
-llvm::SmallVector jl_get_llvm_clone_targets(void)
+llvm::SmallVector jl_get_llvm_clone_targets(const char *cpu_target)
{
- auto &cmdline = get_cmdline_targets();
+
+ auto &cmdline = get_cmdline_targets(cpu_target);
check_cmdline(cmdline, true);
llvm::SmallVector, 0> image_targets;
for (auto &arg: cmdline) {
diff --git a/src/rtutils.c b/src/rtutils.c
index 6515b80c5d2b5..5966497ec331c 100644
--- a/src/rtutils.c
+++ b/src/rtutils.c
@@ -579,7 +579,7 @@ JL_DLLEXPORT jl_value_t *jl_stderr_obj(void) JL_NOTSAFEPOINT
if (jl_base_module == NULL)
return NULL;
jl_binding_t *stderr_obj = jl_get_module_binding(jl_base_module, jl_symbol("stderr"), 0);
- return stderr_obj ? jl_get_binding_value_if_resolved(stderr_obj) : NULL;
+ return stderr_obj ? jl_get_binding_value_if_resolved_debug_only(stderr_obj) : NULL;
}
// toys for debugging ---------------------------------------------------------
@@ -674,9 +674,8 @@ static int is_globname_binding(jl_value_t *v, jl_datatype_t *dv) JL_NOTSAFEPOINT
jl_sym_t *globname = dv->name->mt != NULL ? dv->name->mt->name : NULL;
if (globname && dv->name->module) {
jl_binding_t *b = jl_get_module_binding(dv->name->module, globname, 0);
- jl_value_t *bv = jl_get_binding_value_if_resolved_and_const(b);
- // The `||` makes this function work for both function instances and function types.
- if (bv && (bv == v || jl_typeof(bv) == v))
+ jl_value_t *bv = jl_get_binding_value_if_latest_resolved_and_const_debug_only(b);
+ if (bv && ((jl_value_t*)dv == v ? jl_typeof(bv) == v : bv == v))
return 1;
}
return 0;
diff --git a/src/runtime_ccall.cpp b/src/runtime_ccall.cpp
index 2a6cb00961594..dd5ceb2c6ad90 100644
--- a/src/runtime_ccall.cpp
+++ b/src/runtime_ccall.cpp
@@ -326,6 +326,136 @@ jl_value_t *jl_get_cfunction_trampoline(
}
JL_GCC_IGNORE_STOP
+struct cfuncdata_t {
+ jl_code_instance_t** plast_codeinst;
+ jl_code_instance_t* last_codeinst;
+ void *unspecialized;
+ jl_value_t *const *const declrt;
+ jl_value_t *const *const sigt;
+ size_t flags;
+};
+
+extern "C" JL_DLLEXPORT
+void *jl_jit_abi_converter_fallback(jl_task_t *ct, void *unspecialized, jl_value_t *declrt, jl_value_t *sigt, size_t nargs, int specsig,
+ jl_code_instance_t *codeinst, jl_callptr_t invoke, void *target, int target_specsig)
+{
+ if (unspecialized)
+ return unspecialized;
+ jl_errorf("cfunction not available in this build of Julia");
+}
+
+static const inline char *name_from_method_instance(jl_method_instance_t *li) JL_NOTSAFEPOINT
+{
+ return jl_is_method(li->def.method) ? jl_symbol_name(li->def.method->name) : "top-level scope";
+}
+
+static jl_mutex_t cfun_lock;
+// release jl_world_counter
+// store theFptr
+// release last_world_v
+//
+// acquire last_world_v
+// read theFptr
+// acquire jl_world_counter
+extern "C" JL_DLLEXPORT
+void *jl_get_abi_converter(jl_task_t *ct, _Atomic(void*) *fptr, _Atomic(size_t) *last_world, void *data)
+{
+ cfuncdata_t *cfuncdata = (cfuncdata_t*)data;
+ jl_value_t *sigt = *cfuncdata->sigt;
+ JL_GC_PROMISE_ROOTED(sigt);
+ jl_value_t *declrt = *cfuncdata->declrt;
+ JL_GC_PROMISE_ROOTED(declrt);
+ bool specsig = cfuncdata->flags & 1;
+ size_t nargs = jl_nparams(sigt);
+ jl_method_instance_t *mi;
+ jl_code_instance_t *codeinst;
+ size_t world;
+ // check first, while behind this lock, of the validity of the current contents of this cfunc thunk
+ JL_LOCK(&cfun_lock);
+ do {
+ size_t last_world_v = jl_atomic_load_relaxed(last_world);
+ void *f = jl_atomic_load_relaxed(fptr);
+ jl_code_instance_t *last_ci = cfuncdata->plast_codeinst ? *cfuncdata->plast_codeinst : nullptr;
+ world = jl_atomic_load_acquire(&jl_world_counter);
+ ct->world_age = world;
+ if (world == last_world_v) {
+ JL_UNLOCK(&cfun_lock);
+ return f;
+ }
+ mi = jl_get_specialization1((jl_tupletype_t*)sigt, world, 0);
+ if (f != nullptr) {
+ if (last_ci == nullptr) {
+ if (mi == nullptr) {
+ jl_atomic_store_release(last_world, world);
+ JL_UNLOCK(&cfun_lock);
+ return f;
+ }
+ }
+ else {
+ if (jl_get_ci_mi(last_ci) == mi && jl_atomic_load_relaxed(&last_ci->max_world) >= world) { // same dispatch and source
+ jl_atomic_store_release(last_world, world);
+ JL_UNLOCK(&cfun_lock);
+ return f;
+ }
+ }
+ }
+ JL_UNLOCK(&cfun_lock);
+ // next, try to figure out what the target should look like (outside of the lock since this is very slow)
+ codeinst = mi ? jl_type_infer(mi, world, SOURCE_MODE_ABI) : nullptr;
+ // relock for the remainder of the function
+ JL_LOCK(&cfun_lock);
+ } while (jl_atomic_load_acquire(&jl_world_counter) != world); // restart entirely, since jl_world_counter changed thus jl_get_specialization1 might have changed
+ // double-check if the values were set on another thread
+ size_t last_world_v = jl_atomic_load_relaxed(last_world);
+ void *f = jl_atomic_load_relaxed(fptr);
+ if (world == last_world_v) {
+ JL_UNLOCK(&cfun_lock);
+ return f; // another thread fixed this up while we were away
+ }
+ auto assign_fptr = [fptr, last_world, cfuncdata, world, codeinst](void *f) {
+ cfuncdata->plast_codeinst = &cfuncdata->last_codeinst;
+ cfuncdata->last_codeinst = codeinst;
+ jl_atomic_store_relaxed(fptr, f);
+ jl_atomic_store_release(last_world, world);
+ JL_UNLOCK(&cfun_lock);
+ return f;
+ };
+ jl_callptr_t invoke = nullptr;
+ if (codeinst != NULL) {
+ jl_value_t *astrt = codeinst->rettype;
+ if (astrt != (jl_value_t*)jl_bottom_type &&
+ jl_type_intersection(astrt, declrt) == jl_bottom_type) {
+ // Do not warn if the function never returns since it is
+ // occasionally required by the C API (typically error callbacks)
+ // even though we're likely to encounter memory errors in that case
+ jl_printf(JL_STDERR, "WARNING: cfunction: return type of %s does not match\n", name_from_method_instance(mi));
+ }
+ uint8_t specsigflags;
+ jl_read_codeinst_invoke(codeinst, &specsigflags, &invoke, &f, 1);
+ if (invoke != nullptr) {
+ if (invoke == jl_fptr_const_return_addr) {
+ return assign_fptr(jl_jit_abi_converter(ct, cfuncdata->unspecialized, declrt, sigt, nargs, specsig, codeinst, invoke, nullptr, false));
+ }
+ else if (invoke == jl_fptr_args_addr) {
+ assert(f);
+ if (!specsig && jl_subtype(astrt, declrt))
+ return assign_fptr(f);
+ return assign_fptr(jl_jit_abi_converter(ct, cfuncdata->unspecialized, declrt, sigt, nargs, specsig, codeinst, invoke, f, false));
+ }
+ else if (specsigflags & 0b1) {
+ assert(f);
+ if (specsig && jl_egal(mi->specTypes, sigt) && jl_egal(declrt, astrt))
+ return assign_fptr(f);
+ return assign_fptr(jl_jit_abi_converter(ct, cfuncdata->unspecialized, declrt, sigt, nargs, specsig, codeinst, invoke, f, true));
+ }
+ }
+ }
+ f = jl_jit_abi_converter(ct, cfuncdata->unspecialized, declrt, sigt, nargs, specsig, codeinst, invoke, nullptr, false);
+ if (codeinst == nullptr)
+ cfuncdata->unspecialized = f;
+ return assign_fptr(f);
+}
+
void jl_init_runtime_ccall(void)
{
JL_MUTEX_INIT(&libmap_lock, "libmap_lock");
diff --git a/src/runtime_intrinsics.c b/src/runtime_intrinsics.c
index f5b281f9e92ed..2671bebfd7f55 100644
--- a/src/runtime_intrinsics.c
+++ b/src/runtime_intrinsics.c
@@ -1073,31 +1073,26 @@ typedef void (fintrinsic_op1)(unsigned, jl_value_t*, void*, void*);
static inline jl_value_t *jl_fintrinsic_1(jl_value_t *ty, jl_value_t *a, const char *name, fintrinsic_op1 *bfloatop, fintrinsic_op1 *halfop, fintrinsic_op1 *floatop, fintrinsic_op1 *doubleop)
{
jl_task_t *ct = jl_current_task;
- if (!jl_is_primitivetype(jl_typeof(a)))
+ jl_datatype_t *aty = (jl_datatype_t *)jl_typeof(a);
+ if (!jl_is_primitivetype(aty))
jl_errorf("%s: value is not a primitive type", name);
if (!jl_is_primitivetype(ty))
jl_errorf("%s: type is not a primitive type", name);
unsigned sz2 = jl_datatype_size(ty);
jl_value_t *newv = jl_gc_alloc(ct->ptls, sz2, ty);
void *pa = jl_data_ptr(a), *pr = jl_data_ptr(newv);
- unsigned sz = jl_datatype_size(jl_typeof(a));
- switch (sz) {
- /* choose the right size c-type operation based on the input */
- case 2:
- if (jl_typeof(a) == (jl_value_t*)jl_float16_type)
- halfop(sz2 * host_char_bit, ty, pa, pr);
- else /*if (jl_typeof(a) == (jl_value_t*)jl_bfloat16_type)*/
- bfloatop(sz2 * host_char_bit, ty, pa, pr);
- break;
- case 4:
+
+ if (aty == jl_float16_type)
+ halfop(sz2 * host_char_bit, ty, pa, pr);
+ else if (aty == jl_bfloat16_type)
+ bfloatop(sz2 * host_char_bit, ty, pa, pr);
+ else if (aty == jl_float32_type)
floatop(sz2 * host_char_bit, ty, pa, pr);
- break;
- case 8:
+ else if (aty == jl_float64_type)
doubleop(sz2 * host_char_bit, ty, pa, pr);
- break;
- default:
- jl_errorf("%s: runtime floating point intrinsics are not implemented for bit sizes other than 16, 32 and 64", name);
- }
+ else
+ jl_errorf("%s: runtime floating point intrinsics require both arguments to be Float16, BFloat16, Float32, or Float64", name);
+
return newv;
}
@@ -1273,6 +1268,7 @@ JL_DLLEXPORT jl_value_t *jl_##name(jl_value_t *a, jl_value_t *b) \
{ \
jl_task_t *ct = jl_current_task; \
jl_value_t *ty = jl_typeof(a); \
+ jl_datatype_t *aty = (jl_datatype_t *)ty; \
if (jl_typeof(b) != ty) \
jl_error(#name ": types of a and b must match"); \
if (!jl_is_primitivetype(ty)) \
@@ -1280,23 +1276,16 @@ JL_DLLEXPORT jl_value_t *jl_##name(jl_value_t *a, jl_value_t *b) \
int sz = jl_datatype_size(ty); \
jl_value_t *newv = jl_gc_alloc(ct->ptls, sz, ty); \
void *pa = jl_data_ptr(a), *pb = jl_data_ptr(b), *pr = jl_data_ptr(newv); \
- switch (sz) { \
- /* choose the right size c-type operation */ \
- case 2: \
- if ((jl_datatype_t*)ty == jl_float16_type) \
- jl_##name##16(16, pa, pb, pr); \
- else /*if ((jl_datatype_t*)ty == jl_bfloat16_type)*/ \
- jl_##name##bf16(16, pa, pb, pr); \
- break; \
- case 4: \
+ if (aty == jl_float16_type) \
+ jl_##name##16(16, pa, pb, pr); \
+ else if (aty == jl_bfloat16_type) \
+ jl_##name##bf16(16, pa, pb, pr); \
+ else if (aty == jl_float32_type) \
jl_##name##32(32, pa, pb, pr); \
- break; \
- case 8: \
+ else if (aty == jl_float64_type) \
jl_##name##64(64, pa, pb, pr); \
- break; \
- default: \
- jl_error(#name ": runtime floating point intrinsics are not implemented for bit sizes other than 16, 32 and 64"); \
- } \
+ else \
+ jl_error(#name ": runtime floating point intrinsics require both arguments to be Float16, BFloat16, Float32, or Float64"); \
return newv; \
}
@@ -1308,30 +1297,24 @@ JL_DLLEXPORT jl_value_t *jl_##name(jl_value_t *a, jl_value_t *b) \
JL_DLLEXPORT jl_value_t *jl_##name(jl_value_t *a, jl_value_t *b) \
{ \
jl_value_t *ty = jl_typeof(a); \
+ jl_datatype_t *aty = (jl_datatype_t *)ty; \
if (jl_typeof(b) != ty) \
jl_error(#name ": types of a and b must match"); \
if (!jl_is_primitivetype(ty)) \
jl_error(#name ": values are not primitive types"); \
void *pa = jl_data_ptr(a), *pb = jl_data_ptr(b); \
- int sz = jl_datatype_size(ty); \
int cmp; \
- switch (sz) { \
- /* choose the right size c-type operation */ \
- case 2: \
- if ((jl_datatype_t*)ty == jl_float16_type) \
- cmp = jl_##name##16(16, pa, pb); \
- else /*if ((jl_datatype_t*)ty == jl_bfloat16_type)*/ \
- cmp = jl_##name##bf16(16, pa, pb); \
- break; \
- case 4: \
+ if (aty == jl_float16_type) \
+ cmp = jl_##name##16(16, pa, pb); \
+ else if (aty == jl_bfloat16_type) \
+ cmp = jl_##name##bf16(16, pa, pb); \
+ else if (aty == jl_float32_type) \
cmp = jl_##name##32(32, pa, pb); \
- break; \
- case 8: \
+ else if (aty == jl_float64_type) \
cmp = jl_##name##64(64, pa, pb); \
- break; \
- default: \
- jl_error(#name ": runtime floating point intrinsics are not implemented for bit sizes other than 32 and 64"); \
- } \
+ else \
+ jl_error(#name ": runtime floating point intrinsics require both arguments to be Float16, BFloat16, Float32, or Float64"); \
+ \
return cmp ? jl_true : jl_false; \
}
@@ -1344,6 +1327,7 @@ JL_DLLEXPORT jl_value_t *jl_##name(jl_value_t *a, jl_value_t *b, jl_value_t *c)
{ \
jl_task_t *ct = jl_current_task; \
jl_value_t *ty = jl_typeof(a); \
+ jl_datatype_t *aty = (jl_datatype_t *)ty; \
if (jl_typeof(b) != ty || jl_typeof(c) != ty) \
jl_error(#name ": types of a, b, and c must match"); \
if (!jl_is_primitivetype(ty)) \
@@ -1351,23 +1335,16 @@ JL_DLLEXPORT jl_value_t *jl_##name(jl_value_t *a, jl_value_t *b, jl_value_t *c)
int sz = jl_datatype_size(ty); \
jl_value_t *newv = jl_gc_alloc(ct->ptls, sz, ty); \
void *pa = jl_data_ptr(a), *pb = jl_data_ptr(b), *pc = jl_data_ptr(c), *pr = jl_data_ptr(newv); \
- switch (sz) { \
- /* choose the right size c-type operation */ \
- case 2: \
- if ((jl_datatype_t*)ty == jl_float16_type) \
+ if (aty == jl_float16_type) \
jl_##name##16(16, pa, pb, pc, pr); \
- else /*if ((jl_datatype_t*)ty == jl_bfloat16_type)*/ \
+ else if (aty == jl_bfloat16_type) \
jl_##name##bf16(16, pa, pb, pc, pr); \
- break; \
- case 4: \
+ else if (aty == jl_float32_type) \
jl_##name##32(32, pa, pb, pc, pr); \
- break; \
- case 8: \
+ else if (aty == jl_float64_type) \
jl_##name##64(64, pa, pb, pc, pr); \
- break; \
- default: \
- jl_error(#name ": runtime floating point intrinsics are not implemented for bit sizes other than 16, 32 and 64"); \
- } \
+ else \
+ jl_error(#name ": runtime floating point intrinsics require both arguments to be Float16, BFloat16, Float32, or Float64"); \
return newv; \
}
@@ -1629,8 +1606,8 @@ cvt_iintrinsic(LLVMFPtoUI, fptoui)
#define fintrinsic_read_float32(p) *(float *)p
#define fintrinsic_read_float64(p) *(double *)p
-#define fintrinsic_write_float16(p, x) *(uint16_t *)p = float_to_half(x)
-#define fintrinsic_write_bfloat16(p, x) *(uint16_t *)p = float_to_bfloat(x)
+#define fintrinsic_write_float16(p, x) *(uint16_t *)p = double_to_half(x)
+#define fintrinsic_write_bfloat16(p, x) *(uint16_t *)p = double_to_bfloat(x)
#define fintrinsic_write_float32(p, x) *(float *)p = x
#define fintrinsic_write_float64(p, x) *(double *)p = x
@@ -1661,7 +1638,7 @@ static inline void fptrunc(jl_datatype_t *aty, void *pa, jl_datatype_t *ty, void
fptrunc_convert(float64, bfloat16);
fptrunc_convert(float64, float32);
else
- jl_error("fptrunc: runtime floating point intrinsics are not implemented for bit sizes other than 16, 32 and 64");
+ jl_error("fptrunc: runtime floating point intrinsics require both arguments to be Float16, BFloat16, Float32, or Float64");
#undef fptrunc_convert
}
@@ -1685,7 +1662,7 @@ static inline void fpext(jl_datatype_t *aty, void *pa, jl_datatype_t *ty, void *
fpext_convert(bfloat16, float64);
fpext_convert(float32, float64);
else
- jl_error("fptrunc: runtime floating point intrinsics are not implemented for bit sizes other than 16, 32 and 64");
+ jl_error("fptrunc: runtime floating point intrinsics require both arguments to be Float16, BFloat16, Float32, or Float64");
#undef fpext_convert
}
diff --git a/src/safepoint.c b/src/safepoint.c
index 66bea539861f8..96da3c1a05eb1 100644
--- a/src/safepoint.c
+++ b/src/safepoint.c
@@ -157,7 +157,7 @@ void jl_gc_wait_for_the_world(jl_ptls_t* gc_all_tls_states, int gc_n_threads)
uv_mutex_unlock(&safepoint_lock);
}
else {
- const int64_t timeout = jl_options.timeout_for_safepoint_straggler_s * 1000000000; // convert to nanoseconds
+ const int64_t timeout = jl_options.timeout_for_safepoint_straggler_s * 1000000000LL; // convert to nanoseconds
int ret = 0;
uv_mutex_lock(&safepoint_lock);
if (!jl_atomic_load_relaxed(&ptls2->gc_state)) {
@@ -172,7 +172,7 @@ void jl_gc_wait_for_the_world(jl_ptls_t* gc_all_tls_states, int gc_n_threads)
size_t bt_size = jl_try_record_thread_backtrace(ptls2, ptls->bt_data, JL_MAX_BT_SIZE);
// Print the backtrace of the straggler
for (size_t i = 0; i < bt_size; i += jl_bt_entry_size(ptls->bt_data + i)) {
- jl_print_bt_entry_codeloc(ptls->bt_data + i);
+ jl_print_bt_entry_codeloc(-1, ptls->bt_data + i);
}
}
}
diff --git a/src/signal-handling.c b/src/signal-handling.c
index ff073cc82a0a5..12e04a8e45b54 100644
--- a/src/signal-handling.c
+++ b/src/signal-handling.c
@@ -30,6 +30,8 @@ static const uint64_t GIGA = 1000000000ULL;
// Timers to take samples at intervals
JL_DLLEXPORT void jl_profile_stop_timer(void);
JL_DLLEXPORT int jl_profile_start_timer(uint8_t);
+// File-descriptor for safe logging on signal handling
+int jl_sig_fd;
///////////////////////
// Utility functions //
@@ -613,7 +615,7 @@ void jl_critical_error(int sig, int si_code, bt_context_t *context, jl_task_t *c
*bt_size = n = rec_backtrace_ctx(bt_data, JL_MAX_BT_SIZE, context, NULL);
}
for (i = 0; i < n; i += jl_bt_entry_size(bt_data + i)) {
- jl_print_bt_entry_codeloc(bt_data + i);
+ jl_print_bt_entry_codeloc(sig, bt_data + i);
}
jl_gc_debug_print_status();
jl_gc_debug_critical_error();
diff --git a/src/signals-mach.c b/src/signals-mach.c
index 1c4af2cf9d033..62f493d8264ec 100644
--- a/src/signals-mach.c
+++ b/src/signals-mach.c
@@ -826,6 +826,10 @@ void *mach_profile_listener(void *arg)
for (int idx = nthreads; idx-- > 0; ) {
// Stop the threads in random order.
int i = randperm[idx];
+ // skip heartbeat thread
+ if (i == heartbeat_tid) {
+ continue;
+ }
jl_profile_thread_mach(i);
}
}
diff --git a/src/signals-unix.c b/src/signals-unix.c
index c730f27f16def..ed186d5b357cd 100644
--- a/src/signals-unix.c
+++ b/src/signals-unix.c
@@ -42,7 +42,7 @@
#endif
// 8M signal stack, same as default stack size (though we barely use this)
-static const size_t sig_stack_size = 8 * 1024 * 1024;
+const size_t sig_stack_size = 8 * 1024 * 1024;
#include "julia_assert.h"
@@ -102,14 +102,6 @@ static inline uintptr_t jl_get_rsp_from_ctx(const void *_ctx)
#endif
}
-static int is_addr_on_sigstack(jl_ptls_t ptls, void *ptr) JL_NOTSAFEPOINT
-{
- // One guard page for signal_stack.
- return ptls->signal_stack == NULL ||
- ((char*)ptr >= (char*)ptls->signal_stack - jl_page_size &&
- (char*)ptr <= (char*)ptls->signal_stack + (ptls->signal_stack_size ? ptls->signal_stack_size : sig_stack_size));
-}
-
// Modify signal context `_ctx` so that `fptr` will execute when the signal returns
// The function `fptr` itself must not return.
JL_NO_ASAN static void jl_call_in_ctx(jl_ptls_t ptls, void (*fptr)(void), int sig, void *_ctx)
@@ -310,14 +302,34 @@ int exc_reg_is_write_fault(uintptr_t esr) {
#include
#include
+#ifndef _OS_FREEBSD_
+typedef struct {
+ void (*f)(void*) JL_NOTSAFEPOINT;
+ void *ctx;
+} callback_t;
+static int with_dl_iterate_phdr_lock(struct dl_phdr_info *info, size_t size, void *data)
+{
+ jl_lock_profile();
+ callback_t *callback = (callback_t*)data;
+ callback->f(callback->ctx);
+ jl_unlock_profile();
+ return 1; // only call this once
+}
+#endif
+
void jl_with_stackwalk_lock(void (*f)(void*), void *ctx)
{
- sigset_t sset, oset;
- sigemptyset(&sset);
- sigaddset(&sset, SIGUSR2);
- pthread_sigmask(SIG_BLOCK, &sset, &oset);
+#ifndef _OS_FREEBSD_
+ callback_t callback = {f, ctx};
+ dl_iterate_phdr(with_dl_iterate_phdr_lock, &callback);
+#else
+ // FreeBSD makes the questionable decisions to use a terrible implementation of a spin
+ // lock and to block all signals while a lock is held. However, that also means it is
+ // not currently vulnerable to this libunwind bug that other platforms can encounter.
+ jl_lock_profile();
f(ctx);
- pthread_sigmask(SIG_SETMASK, &oset, NULL);
+ jl_unlock_profile();
+#endif
}
#if defined(_OS_LINUX_) && (defined(_CPU_X86_64_) || defined(_CPU_X86_))
@@ -539,6 +551,7 @@ static int thread0_exit_signo = 0;
static void JL_NORETURN jl_exit_thread0_cb(void)
{
CFI_NORETURN
+ jl_atomic_fetch_add(&jl_gc_disable_counter, -1);
jl_critical_error(thread0_exit_signo, 0, NULL, jl_current_task);
jl_atexit_hook(128);
jl_raise(thread0_exit_signo);
@@ -852,6 +865,10 @@ static void do_profile(void *ctx)
for (int idx = nthreads; idx-- > 0; ) {
// Stop the threads in the random order.
int tid = randperm[idx];
+ // skip heartbeat thread
+ if (tid == heartbeat_tid) {
+ return;
+ }
// do backtrace for profiler
if (!profile_running)
return;
@@ -1069,6 +1086,10 @@ static void *signal_listener(void *arg)
//#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 199309L && !HAVE_KEVENT
// si_code = info.si_code;
//#endif
+ // Let's forbid threads from running GC while we're trying to exit,
+ // also let's make sure we're not in the middle of GC.
+ jl_atomic_fetch_add(&jl_gc_disable_counter, 1);
+ jl_safepoint_wait_gc(NULL);
jl_exit_thread0(sig, signal_bt_data, signal_bt_size);
}
else if (critical) {
@@ -1086,7 +1107,7 @@ static void *signal_listener(void *arg)
jl_safe_printf("\nsignal (%d): %s\n", sig, strsignal(sig));
size_t i;
for (i = 0; i < signal_bt_size; i += jl_bt_entry_size(signal_bt_data + i)) {
- jl_print_bt_entry_codeloc(signal_bt_data + i);
+ jl_print_bt_entry_codeloc(sig, signal_bt_data + i);
}
}
}
diff --git a/src/signals-win.c b/src/signals-win.c
index c8ae74f52dba4..d9c7ffd5ae769 100644
--- a/src/signals-win.c
+++ b/src/signals-win.c
@@ -4,7 +4,7 @@
// Note that this file is `#include`d by "signal-handling.c"
#include // hidden by LEAN_AND_MEAN
-static const size_t sig_stack_size = 131072; // 128k reserved for backtrace_fiber for stack overflow handling
+const size_t sig_stack_size = 131072; // 128k reserved for backtrace_fiber for stack overflow handling
// Copied from MINGW_FLOAT_H which may not be found due to a collision with the builtin gcc float.h
// eventually we can probably integrate this into OpenLibm.
@@ -333,7 +333,7 @@ LONG WINAPI jl_exception_handler(struct _EXCEPTION_POINTERS *ExceptionInfo)
jl_safe_printf("UNKNOWN"); break;
}
jl_safe_printf(" at 0x%zx -- ", (size_t)ExceptionInfo->ExceptionRecord->ExceptionAddress);
- jl_print_native_codeloc((uintptr_t)ExceptionInfo->ExceptionRecord->ExceptionAddress);
+ jl_print_native_codeloc("", (uintptr_t)ExceptionInfo->ExceptionRecord->ExceptionAddress);
jl_critical_error(0, 0, ExceptionInfo->ContextRecord, ct);
static int recursion = 0;
diff --git a/src/simplevector.c b/src/simplevector.c
index 5f1fd744abd0c..d5b254f4a4409 100644
--- a/src/simplevector.c
+++ b/src/simplevector.c
@@ -56,6 +56,19 @@ JL_DLLEXPORT jl_svec_t *jl_svec2(void *a, void *b)
return v;
}
+JL_DLLEXPORT jl_svec_t *jl_svec3(void *a, void *b, void *c)
+{
+ jl_task_t *ct = jl_current_task;
+ jl_svec_t *v = (jl_svec_t*)jl_gc_alloc(ct->ptls, sizeof(void*) * 4,
+ jl_simplevector_type);
+ jl_set_typetagof(v, jl_simplevector_tag, 0);
+ jl_svec_set_len_unsafe(v, 3);
+ jl_svec_data(v)[0] = (jl_value_t*)a;
+ jl_svec_data(v)[1] = (jl_value_t*)b;
+ jl_svec_data(v)[2] = (jl_value_t*)c;
+ return v;
+}
+
JL_DLLEXPORT jl_svec_t *jl_alloc_svec_uninit(size_t n)
{
jl_task_t *ct = jl_current_task;
diff --git a/src/stackwalk.c b/src/stackwalk.c
index 14dc5709671dc..8a30b7f1ac7fd 100644
--- a/src/stackwalk.c
+++ b/src/stackwalk.c
@@ -98,9 +98,13 @@ static int jl_unw_stepn(bt_cursor_t *cursor, jl_bt_element_t *bt_data, size_t *b
}
uintptr_t oldsp = thesp;
have_more_frames = jl_unw_step(cursor, from_signal_handler, &return_ip, &thesp);
- if (oldsp >= thesp && !jl_running_under_rr(0)) {
- // The stack pointer is clearly bad, as it must grow downwards.
+ if ((n < 2 ? oldsp > thesp : oldsp >= thesp) && !jl_running_under_rr(0)) {
+ // The stack pointer is clearly bad, as it must grow downwards,
// But sometimes the external unwinder doesn't check that.
+ // Except for n==0 when there is no oldsp and n==1 on all platforms but i686/x86_64.
+ // (on x86, the platform first pushes the new stack frame, then does the
+ // call, on almost all other platforms, the platform first does the call,
+ // then the user pushes the link register to the frame).
have_more_frames = 0;
}
if (return_ip == 0) {
@@ -132,11 +136,11 @@ static int jl_unw_stepn(bt_cursor_t *cursor, jl_bt_element_t *bt_data, size_t *b
// * The way that libunwind handles it in `unw_get_proc_name`:
// https://lists.nongnu.org/archive/html/libunwind-devel/2014-06/msg00025.html
uintptr_t call_ip = return_ip;
+ #if defined(_CPU_ARM_)
// ARM instruction pointer encoding uses the low bit as a flag for
// thumb mode, which must be cleared before further use. (Note not
// needed for ARM AArch64.) See
// https://github.com/libunwind/libunwind/pull/131
- #ifdef _CPU_ARM_
call_ip &= ~(uintptr_t)0x1;
#endif
// Now there's two main cases to adjust for:
@@ -633,22 +637,25 @@ JL_DLLEXPORT jl_value_t *jl_lookup_code_address(void *ip, int skipC)
return rs;
}
-static void jl_safe_print_codeloc(const char* func_name, const char* file_name,
+static void jl_safe_print_codeloc(const char *pre_str,
+ const char* func_name, const char* file_name,
int line, int inlined) JL_NOTSAFEPOINT
{
const char *inlined_str = inlined ? " [inlined]" : "";
if (line != -1) {
- jl_safe_printf("%s at %s:%d%s\n", func_name, file_name, line, inlined_str);
+ jl_safe_printf("%s%s at %s:%d%s\n",
+ pre_str, func_name, file_name, line, inlined_str);
}
else {
- jl_safe_printf("%s at %s (unknown line)%s\n", func_name, file_name, inlined_str);
+ jl_safe_printf("%s%s at %s (unknown line)%s\n",
+ pre_str, func_name, file_name, inlined_str);
}
}
// Print function, file and line containing native instruction pointer `ip` by
// looking up debug info. Prints multiple such frames when `ip` points to
// inlined code.
-void jl_print_native_codeloc(uintptr_t ip) JL_NOTSAFEPOINT
+void jl_print_native_codeloc(char *pre_str, uintptr_t ip) JL_NOTSAFEPOINT
{
// This function is not allowed to reference any TLS variables since
// it can be called from an unmanaged thread on OSX.
@@ -660,10 +667,11 @@ void jl_print_native_codeloc(uintptr_t ip) JL_NOTSAFEPOINT
for (i = 0; i < n; i++) {
jl_frame_t frame = frames[i];
if (!frame.func_name) {
- jl_safe_printf("unknown function (ip: %p) at %s\n", (void*)ip, frame.file_name ? frame.file_name : "(unknown file)");
+ jl_safe_printf("%sunknown function (ip: %p) at %s\n", pre_str, (void*)ip, frame.file_name ? frame.file_name : "(unknown file)");
}
else {
- jl_safe_print_codeloc(frame.func_name, frame.file_name, frame.line, frame.inlined);
+ jl_safe_print_codeloc(pre_str, frame.func_name,
+ frame.file_name, frame.line, frame.inlined);
free(frame.func_name);
}
free(frame.file_name);
@@ -721,7 +729,7 @@ const char *jl_debuginfo_name(jl_value_t *func)
// func == module : top-level
// func == NULL : macro expansion
-static void jl_print_debugloc(jl_debuginfo_t *debuginfo, jl_value_t *func, size_t ip, int inlined) JL_NOTSAFEPOINT
+static void jl_print_debugloc(const char *pre_str, jl_debuginfo_t *debuginfo, jl_value_t *func, size_t ip, int inlined) JL_NOTSAFEPOINT
{
if (!jl_is_symbol(debuginfo->def)) // this is a path or
func = debuginfo->def; // this is inlined code
@@ -730,26 +738,36 @@ static void jl_print_debugloc(jl_debuginfo_t *debuginfo, jl_value_t *func, size_
if (edges_idx) {
jl_debuginfo_t *edge = (jl_debuginfo_t*)jl_svecref(debuginfo->edges, edges_idx - 1);
assert(jl_typetagis(edge, jl_debuginfo_type));
- jl_print_debugloc(edge, NULL, stmt.pc, 1);
+ jl_print_debugloc(pre_str, edge, NULL, stmt.pc, 1);
}
intptr_t ip2 = stmt.line;
if (ip2 >= 0 && ip > 0 && (jl_value_t*)debuginfo->linetable != jl_nothing) {
- jl_print_debugloc(debuginfo->linetable, func, ip2, 0);
+ jl_print_debugloc(pre_str, debuginfo->linetable, func, ip2, 0);
}
else {
if (ip2 < 0) // set broken debug info to ignored
ip2 = 0;
const char *func_name = jl_debuginfo_name(func);
const char *file = jl_debuginfo_file(debuginfo);
- jl_safe_print_codeloc(func_name, file, ip2, inlined);
+ jl_safe_print_codeloc(pre_str, func_name, file, ip2, inlined);
}
}
// Print code location for backtrace buffer entry at *bt_entry
-void jl_print_bt_entry_codeloc(jl_bt_element_t *bt_entry) JL_NOTSAFEPOINT
+void jl_print_bt_entry_codeloc(int sig, jl_bt_element_t *bt_entry) JL_NOTSAFEPOINT
{
+ char sig_str[32], pre_str[64];
+ sig_str[0] = pre_str[0] = '\0';
+ if (sig != -1) {
+ snprintf(sig_str, 32, "signal (%d) ", sig);
+ }
+ // do not call jl_threadid if there's no current task
+ if (jl_get_current_task()) {
+ snprintf(pre_str, 64, "%sthread (%d) ", sig_str, jl_threadid() + 1);
+ }
+
if (jl_bt_is_native(bt_entry)) {
- jl_print_native_codeloc(bt_entry[0].uintptr);
+ jl_print_native_codeloc(pre_str, bt_entry[0].uintptr);
}
else if (jl_bt_entry_tag(bt_entry) == JL_BT_INTERP_FRAME_TAG) {
size_t ip = jl_bt_entry_header(bt_entry); // zero-indexed
@@ -768,7 +786,7 @@ void jl_print_bt_entry_codeloc(jl_bt_element_t *bt_entry) JL_NOTSAFEPOINT
if (jl_is_code_info(code)) {
jl_code_info_t *src = (jl_code_info_t*)code;
// See also the debug info handling in codegen.cpp.
- jl_print_debugloc(src->debuginfo, def, ip + 1, 0);
+ jl_print_debugloc(pre_str, src->debuginfo, def, ip + 1, 0);
}
else {
// If we're using this function something bad has already happened;
@@ -1357,7 +1375,13 @@ JL_DLLEXPORT jl_record_backtrace_result_t jl_record_backtrace(jl_task_t *t, jl_b
JL_DLLEXPORT void jl_gdblookup(void* ip)
{
- jl_print_native_codeloc((uintptr_t)ip);
+ char pre_str[64];
+ pre_str[0] = '\0';
+ // do not call jl_threadid if there's no current task
+ if (jl_get_current_task()) {
+ snprintf(pre_str, 64, "thread (%d) ", jl_threadid() + 1);
+ }
+ jl_print_native_codeloc(pre_str, (uintptr_t)ip);
}
// Print backtrace for current exception in catch block
@@ -1372,7 +1396,7 @@ JL_DLLEXPORT void jlbacktrace(void) JL_NOTSAFEPOINT
size_t i, bt_size = jl_excstack_bt_size(s, s->top);
jl_bt_element_t *bt_data = jl_excstack_bt_data(s, s->top);
for (i = 0; i < bt_size; i += jl_bt_entry_size(bt_data + i)) {
- jl_print_bt_entry_codeloc(bt_data + i);
+ jl_print_bt_entry_codeloc(-1, bt_data + i);
}
}
@@ -1387,7 +1411,7 @@ JL_DLLEXPORT void jlbacktracet(jl_task_t *t) JL_NOTSAFEPOINT
size_t bt_size = r.bt_size;
size_t i;
for (i = 0; i < bt_size; i += jl_bt_entry_size(bt_data + i)) {
- jl_print_bt_entry_codeloc(bt_data + i);
+ jl_print_bt_entry_codeloc(-1, bt_data + i);
}
if (bt_size == 0)
jl_safe_printf(" no backtrace recorded\n");
@@ -1398,11 +1422,30 @@ JL_DLLEXPORT void jl_print_backtrace(void) JL_NOTSAFEPOINT
jlbacktrace();
}
-// Print backtraces for all live tasks, for all threads, to jl_safe_printf stderr
+extern int jl_inside_heartbeat_thread(void);
+extern int jl_heartbeat_pause(void);
+extern int jl_heartbeat_resume(void);
+
+// Print backtraces for all live tasks, for all threads, to jl_safe_printf
+// stderr. This can take a _long_ time!
JL_DLLEXPORT void jl_print_task_backtraces(int show_done) JL_NOTSAFEPOINT
{
+ // disable heartbeats to prevent heartbeat loss while running this,
+ // unless this is called from the heartbeat thread itself; in that
+ // situation, the thread is busy running this and it will not be
+ // updating the missed heartbeats counter
+ if (!jl_inside_heartbeat_thread()) {
+ jl_heartbeat_pause();
+ }
+
size_t nthreads = jl_atomic_load_acquire(&jl_n_threads);
jl_ptls_t *allstates = jl_atomic_load_relaxed(&jl_all_tls_states);
+ int ctid = -1;
+ // do not call jl_threadid if there's no current task
+ if (jl_get_current_task()) {
+ ctid = jl_threadid() + 1;
+ }
+ jl_safe_printf("thread (%d) ++++ Task backtraces\n", ctid);
for (size_t i = 0; i < nthreads; i++) {
jl_ptls_t ptls2 = allstates[i];
if (gc_is_collector_thread(i)) {
@@ -1418,17 +1461,22 @@ JL_DLLEXPORT void jl_print_task_backtraces(int show_done) JL_NOTSAFEPOINT
jl_task_t *t = ptls2->root_task;
if (t != NULL)
t_state = jl_atomic_load_relaxed(&t->_state);
- jl_safe_printf("==== Thread %d created %zu live tasks\n",
- ptls2->tid + 1, n + (t_state != JL_TASK_STATE_DONE));
+ jl_safe_printf("thread (%d) ==== Thread %d created %zu live tasks\n",
+ ctid, ptls2->tid + 1, n + (t_state != JL_TASK_STATE_DONE));
if (show_done || t_state != JL_TASK_STATE_DONE) {
- jl_safe_printf(" ---- Root task (%p)\n", ptls2->root_task);
+ jl_safe_printf("thread (%d) ---- Root task (%p)\n", ctid, ptls2->root_task);
if (t != NULL) {
- jl_safe_printf(" (sticky: %d, started: %d, state: %d, tid: %d)\n",
- t->sticky, t->ctx.started, t_state,
+ jl_safe_printf("thread (%d) (sticky: %d, started: %d, state: %d, tid: %d)\n",
+ ctid, t->sticky, t->ctx.started, t_state,
jl_atomic_load_relaxed(&t->tid) + 1);
- jlbacktracet(t);
+ if (t->ctx.stkbuf != NULL) {
+ jlbacktracet(t);
+ }
+ else {
+ jl_safe_printf("thread (%d) no stack\n", ctid);
+ }
}
- jl_safe_printf(" ---- End root task\n");
+ jl_safe_printf("thread (%d) ---- End root task\n", ctid);
}
for (size_t j = 0; j < n; j++) {
@@ -1438,17 +1486,24 @@ JL_DLLEXPORT void jl_print_task_backtraces(int show_done) JL_NOTSAFEPOINT
int t_state = jl_atomic_load_relaxed(&t->_state);
if (!show_done && t_state == JL_TASK_STATE_DONE)
continue;
- jl_safe_printf(" ---- Task %zu (%p)\n", j + 1, t);
+ jl_safe_printf("thread (%d) ---- Task %zu (%p)\n", ctid, j + 1, t);
// n.b. this information might not be consistent with the stack printing after it, since it could start running or change tid, etc.
- jl_safe_printf(" (sticky: %d, started: %d, state: %d, tid: %d)\n",
- t->sticky, t->ctx.started, t_state,
+ jl_safe_printf("thread (%d) (sticky: %d, started: %d, state: %d, tid: %d)\n",
+ ctid, t->sticky, t->ctx.started, t_state,
jl_atomic_load_relaxed(&t->tid) + 1);
- jlbacktracet(t);
- jl_safe_printf(" ---- End task %zu\n", j + 1);
+ if (t->ctx.stkbuf != NULL)
+ jlbacktracet(t);
+ else
+ jl_safe_printf("thread (%d) no stack\n", ctid);
+ jl_safe_printf("thread (%d) ---- End task %zu\n", ctid, j + 1);
}
- jl_safe_printf("==== End thread %d\n", ptls2->tid + 1);
+ jl_safe_printf("thread (%d) ==== End thread %d\n", ctid, ptls2->tid + 1);
+ }
+ jl_safe_printf("thread (%d) ++++ Done\n", ctid);
+
+ if (!jl_inside_heartbeat_thread()) {
+ jl_heartbeat_resume();
}
- jl_safe_printf("==== Done\n");
}
#ifdef __cplusplus
diff --git a/src/staticdata.c b/src/staticdata.c
index cb1dc54d26d50..eac3bba62f01a 100644
--- a/src/staticdata.c
+++ b/src/staticdata.c
@@ -44,8 +44,6 @@
- step 3 combines the different sections (fields of `jl_serializer_state`) into one
- - step 4 writes the values of the hard-coded tagged items and `ccallable_list`
-
Much of the "real work" during deserialization is done by `get_item_for_reloc`. But a few items require specific
attention:
- uniquing: during deserialization, the target item (an "external" type or MethodInstance) must be checked against
@@ -90,6 +88,23 @@ External links:
static const size_t WORLD_AGE_REVALIDATION_SENTINEL = 0x1;
JL_DLLEXPORT size_t jl_require_world = ~(size_t)0;
+JL_DLLEXPORT _Atomic(size_t) jl_first_image_replacement_world = ~(size_t)0;
+
+// This structure is used to store hash tables for the memoization
+// of queries in staticdata.c (currently only `type_in_worklist`).
+typedef struct {
+ htable_t type_in_worklist;
+} jl_query_cache;
+
+static void init_query_cache(jl_query_cache *cache)
+{
+ htable_new(&cache->type_in_worklist, 0);
+}
+
+static void destroy_query_cache(jl_query_cache *cache)
+{
+ htable_free(&cache->type_in_worklist);
+}
#include "staticdata_utils.c"
#include "precompile_utils.c"
@@ -101,7 +116,7 @@ extern "C" {
// TODO: put WeakRefs on the weak_refs list during deserialization
// TODO: handle finalizers
-#define NUM_TAGS 196
+#define NUM_TAGS 197
// An array of references that need to be restored from the sysimg
// This is a manually constructed dual of the gvars array, which would be produced by codegen for Julia code, for C.
@@ -249,6 +264,7 @@ jl_value_t **const*const get_tags(void) {
INSERT_TAG(jl_atomicerror_type);
INSERT_TAG(jl_missingcodeerror_type);
INSERT_TAG(jl_precompilable_error);
+ INSERT_TAG(jl_trimfailure_type);
// other special values
INSERT_TAG(jl_emptysvec);
@@ -506,8 +522,8 @@ static htable_t bits_replace;
// This is a manually constructed dual of the fvars array, which would be produced by codegen for Julia code, for C.
static const jl_fptr_args_t id_to_fptrs[] = {
&jl_f_throw, &jl_f_throw_methoderror, &jl_f_is, &jl_f_typeof, &jl_f_issubtype, &jl_f_isa,
- &jl_f_typeassert, &jl_f__apply_iterate, &jl_f__apply_pure,
- &jl_f__call_latest, &jl_f__call_in_world, &jl_f__call_in_world_total, &jl_f_isdefined, &jl_f_isdefinedglobal,
+ &jl_f_typeassert, &jl_f__apply_iterate,
+ &jl_f_invokelatest, &jl_f_invoke_in_world, &jl_f__call_in_world_total, &jl_f_isdefined, &jl_f_isdefinedglobal,
&jl_f_tuple, &jl_f_svec, &jl_f_intrinsic_call,
&jl_f_getfield, &jl_f_setfield, &jl_f_swapfield, &jl_f_modifyfield, &jl_f_setfieldonce,
&jl_f_replacefield, &jl_f_fieldtype, &jl_f_nfields, &jl_f_apply_type, &jl_f_memorynew,
@@ -515,8 +531,8 @@ static const jl_fptr_args_t id_to_fptrs[] = {
&jl_f_memoryrefset, &jl_f_memoryrefswap, &jl_f_memoryrefmodify, &jl_f_memoryrefreplace, &jl_f_memoryrefsetonce,
&jl_f_applicable, &jl_f_invoke, &jl_f_sizeof, &jl_f__expr, &jl_f__typevar,
&jl_f_ifelse, &jl_f__structtype, &jl_f__abstracttype, &jl_f__primitivetype,
- &jl_f__typebody, &jl_f__setsuper, &jl_f__equiv_typedef, &jl_f_get_binding_type,
- &jl_f_opaque_closure_call, &jl_f_donotdelete, &jl_f_compilerbarrier,
+ &jl_f__typebody, &jl_f__setsuper, &jl_f__equiv_typedef, &jl_f__defaultctors,
+ &jl_f_opaque_closure_call, &jl_f_donotdelete, &jl_f_compilerbarrier, &jl_f_get_binding_type,
&jl_f_getglobal, &jl_f_setglobal, &jl_f_swapglobal, &jl_f_modifyglobal, &jl_f_replaceglobal, &jl_f_setglobalonce,
&jl_f_finalizer, &jl_f__compute_sparams, &jl_f__svec_ref,
&jl_f_current_scope,
@@ -538,7 +554,6 @@ typedef struct {
arraylist_t uniquing_objs; // a list of locations that reference non-types that must be de-duplicated
arraylist_t fixup_types; // a list of locations of types requiring (re)caching
arraylist_t fixup_objs; // a list of locations of objects requiring (re)caching
- arraylist_t ccallable_list; // @ccallable entry points to install
// mapping from a buildid_idx to a depmods_idx
jl_array_t *buildid_depmods_idxs;
// record of build_ids for all external linkages, in order of serialization for the current sysimg/pkgimg
@@ -550,6 +565,10 @@ typedef struct {
jl_array_t *link_ids_gctags;
jl_array_t *link_ids_gvars;
jl_array_t *link_ids_external_fnvars;
+ jl_array_t *method_roots_list;
+ htable_t method_roots_index;
+ uint64_t worklist_key;
+ jl_query_cache *query_cache;
jl_ptls_t ptls;
jl_image_t *image;
int8_t incremental;
@@ -557,7 +576,6 @@ typedef struct {
static jl_value_t *jl_bigint_type = NULL;
static int gmp_limb_size = 0;
-static jl_sym_t *jl_docmeta_sym = NULL;
#ifdef _P64
#define RELOC_TAG_OFFSET 61
@@ -628,8 +646,7 @@ typedef struct {
} pkgcachesizes;
// --- Static Compile ---
-static void *jl_sysimg_handle = NULL;
-static jl_image_t sysimage;
+static jl_image_buf_t jl_sysimage_buf = { JL_IMAGE_KIND_NONE };
static inline uintptr_t *sysimg_gvars(const char *base, const int32_t *offsets, size_t idx)
{
@@ -641,29 +658,6 @@ JL_DLLEXPORT int jl_running_on_valgrind(void)
return RUNNING_ON_VALGRIND;
}
-void *system_image_data_unavailable;
-extern void * JL_WEAK_SYMBOL_OR_ALIAS_DEFAULT(system_image_data_unavailable) jl_system_image_data;
-extern void * JL_WEAK_SYMBOL_OR_ALIAS_DEFAULT(system_image_data_unavailable) jl_system_image_size;
-static void jl_load_sysimg_so(void)
-{
- const char *sysimg_data;
- assert(sysimage.fptrs.ptrs); // jl_init_processor_sysimg should already be run
- if (jl_sysimg_handle == jl_exe_handle &&
- &jl_system_image_data != JL_WEAK_SYMBOL_DEFAULT(system_image_data_unavailable))
- sysimg_data = (const char*)&jl_system_image_data;
- else
- jl_dlsym(jl_sysimg_handle, "jl_system_image_data", (void **)&sysimg_data, 1);
- size_t *plen;
- if (jl_sysimg_handle == jl_exe_handle &&
- &jl_system_image_size != JL_WEAK_SYMBOL_DEFAULT(system_image_data_unavailable))
- plen = (size_t *)&jl_system_image_size;
- else
- jl_dlsym(jl_sysimg_handle, "jl_system_image_size", (void **)&plen, 1);
- jl_gc_notify_image_load(sysimg_data, *plen);
- jl_restore_system_image_data(sysimg_data, *plen);
-}
-
-
// --- serializer ---
#define NBOX_C 1024
@@ -697,14 +691,13 @@ static int jl_needs_serialization(jl_serializer_state *s, jl_value_t *v) JL_NOTS
return 1;
}
-
-static int caching_tag(jl_value_t *v) JL_NOTSAFEPOINT
+static int caching_tag(jl_value_t *v, jl_query_cache *query_cache) JL_NOTSAFEPOINT
{
if (jl_is_method_instance(v)) {
jl_method_instance_t *mi = (jl_method_instance_t*)v;
jl_value_t *m = mi->def.value;
if (jl_is_method(m) && jl_object_in_image(m))
- return 1 + type_in_worklist(mi->specTypes);
+ return 1 + type_in_worklist(mi->specTypes, query_cache);
}
if (jl_is_binding(v)) {
jl_globalref_t *gr = ((jl_binding_t*)v)->globalref;
@@ -719,24 +712,24 @@ static int caching_tag(jl_value_t *v) JL_NOTSAFEPOINT
if (jl_is_tuple_type(dt) ? !dt->isconcretetype : dt->hasfreetypevars)
return 0; // aka !is_cacheable from jltypes.c
if (jl_object_in_image((jl_value_t*)dt->name))
- return 1 + type_in_worklist(v);
+ return 1 + type_in_worklist(v, query_cache);
}
jl_value_t *dtv = jl_typeof(v);
if (jl_is_datatype_singleton((jl_datatype_t*)dtv)) {
- return 1 - type_in_worklist(dtv); // these are already recached in the datatype in the image
+ return 1 - type_in_worklist(dtv, query_cache); // these are already recached in the datatype in the image
}
return 0;
}
-static int needs_recaching(jl_value_t *v) JL_NOTSAFEPOINT
+static int needs_recaching(jl_value_t *v, jl_query_cache *query_cache) JL_NOTSAFEPOINT
{
- return caching_tag(v) == 2;
+ return caching_tag(v, query_cache) == 2;
}
-static int needs_uniquing(jl_value_t *v) JL_NOTSAFEPOINT
+static int needs_uniquing(jl_value_t *v, jl_query_cache *query_cache) JL_NOTSAFEPOINT
{
assert(!jl_object_in_image(v));
- return caching_tag(v) == 1;
+ return caching_tag(v, query_cache) == 1;
}
static void record_field_change(jl_value_t **addr, jl_value_t *newval) JL_NOTSAFEPOINT
@@ -790,43 +783,46 @@ static void jl_queue_module_for_serialization(jl_serializer_state *s, jl_module_
{
jl_queue_for_serialization(s, m->name);
jl_queue_for_serialization(s, m->parent);
+ if (!jl_options.strip_metadata)
+ jl_queue_for_serialization(s, m->file);
+ jl_queue_for_serialization(s, jl_atomic_load_relaxed(&m->bindingkeyset));
if (jl_options.trim) {
jl_queue_for_serialization_(s, (jl_value_t*)jl_atomic_load_relaxed(&m->bindings), 0, 1);
- } else {
- jl_queue_for_serialization(s, jl_atomic_load_relaxed(&m->bindings));
- }
- jl_queue_for_serialization(s, jl_atomic_load_relaxed(&m->bindingkeyset));
- if (jl_options.strip_metadata || jl_options.trim) {
jl_svec_t *table = jl_atomic_load_relaxed(&m->bindings);
for (size_t i = 0; i < jl_svec_len(table); i++) {
jl_binding_t *b = (jl_binding_t*)jl_svecref(table, i);
if ((void*)b == jl_nothing)
break;
- if (jl_options.strip_metadata) {
- jl_sym_t *name = b->globalref->name;
- if (name == jl_docmeta_sym && jl_get_binding_value(b))
- record_field_change((jl_value_t**)&b->value, jl_nothing);
- }
- if (jl_options.trim) {
- jl_value_t *val = jl_get_binding_value(b);
- // keep binding objects that are defined and ...
- if (val &&
- // ... point to modules ...
- (jl_is_module(val) ||
- // ... or point to __init__ methods ...
- !strcmp(jl_symbol_name(b->globalref->name), "__init__") ||
- // ... or point to Base functions accessed by the runtime
- (m == jl_base_module && (!strcmp(jl_symbol_name(b->globalref->name), "wait") ||
- !strcmp(jl_symbol_name(b->globalref->name), "task_done_hook"))))) {
- jl_queue_for_serialization(s, b);
- }
+ jl_value_t *val = jl_get_binding_value_in_world(b, jl_atomic_load_relaxed(&jl_world_counter));
+ // keep binding objects that are defined in the latest world and ...
+ if (val &&
+ // ... point to modules ...
+ (jl_is_module(val) ||
+ // ... or point to __init__ methods ...
+ !strcmp(jl_symbol_name(b->globalref->name), "__init__") ||
+ // ... or point to Base functions accessed by the runtime
+ (m == jl_base_module && (!strcmp(jl_symbol_name(b->globalref->name), "wait") ||
+ !strcmp(jl_symbol_name(b->globalref->name), "task_done_hook"))))) {
+ jl_queue_for_serialization(s, b);
}
}
}
+ else {
+ jl_queue_for_serialization(s, jl_atomic_load_relaxed(&m->bindings));
+ }
for (size_t i = 0; i < module_usings_length(m); i++) {
jl_queue_for_serialization(s, module_usings_getmod(m, i));
}
+
+ if (jl_options.trim || jl_options.strip_ir) {
+ record_field_change((jl_value_t**)&m->usings_backedges, jl_nothing);
+ record_field_change((jl_value_t**)&m->scanned_methods, jl_nothing);
+ }
+ else {
+ jl_queue_for_serialization(s, m->usings_backedges);
+ jl_queue_for_serialization(s, m->scanned_methods);
+ }
}
// Anything that requires uniquing or fixing during deserialization needs to be "toplevel"
@@ -850,7 +846,7 @@ static void jl_insert_into_serialization_queue(jl_serializer_state *s, jl_value_
jl_datatype_t *dt = (jl_datatype_t*)v;
// ensure all type parameters are recached
jl_queue_for_serialization_(s, (jl_value_t*)dt->parameters, 1, 1);
- if (jl_is_datatype_singleton(dt) && needs_uniquing(dt->instance)) {
+ if (jl_is_datatype_singleton(dt) && needs_uniquing(dt->instance, s->query_cache)) {
assert(jl_needs_serialization(s, dt->instance)); // should be true, since we visited dt
// do not visit dt->instance for our template object as it leads to unwanted cycles here
// (it may get serialized from elsewhere though)
@@ -858,40 +854,51 @@ static void jl_insert_into_serialization_queue(jl_serializer_state *s, jl_value_
}
goto done_fields; // for now
}
- if (s->incremental && jl_is_method_instance(v)) {
+ if (jl_is_method_instance(v)) {
jl_method_instance_t *mi = (jl_method_instance_t*)v;
- jl_value_t *def = mi->def.value;
- if (needs_uniquing(v)) {
- // we only need 3 specific fields of this (the rest are not used)
- jl_queue_for_serialization(s, mi->def.value);
- jl_queue_for_serialization(s, mi->specTypes);
- jl_queue_for_serialization(s, (jl_value_t*)mi->sparam_vals);
- goto done_fields;
- }
- else if (jl_is_method(def) && jl_object_in_image(def)) {
- // we only need 3 specific fields of this (the rest are restored afterward, if valid)
- // in particular, cache is repopulated by jl_mi_cache_insert for all foreign function,
- // so must not be present here
- record_field_change((jl_value_t**)&mi->backedges, NULL);
- record_field_change((jl_value_t**)&mi->cache, NULL);
+ if (s->incremental) {
+ jl_value_t *def = mi->def.value;
+ if (needs_uniquing(v, s->query_cache)) {
+ // we only need 3 specific fields of this (the rest are not used)
+ jl_queue_for_serialization(s, mi->def.value);
+ jl_queue_for_serialization(s, mi->specTypes);
+ jl_queue_for_serialization(s, (jl_value_t*)mi->sparam_vals);
+ goto done_fields;
+ }
+ else if (jl_is_method(def) && jl_object_in_image(def)) {
+ // we only need 3 specific fields of this (the rest are restored afterward, if valid)
+ // in particular, cache is repopulated by jl_mi_cache_insert for all foreign function,
+ // so must not be present here
+ record_field_change((jl_value_t**)&mi->backedges, NULL);
+ record_field_change((jl_value_t**)&mi->cache, NULL);
+ }
+ else {
+ assert(!needs_recaching(v, s->query_cache));
+ }
+ // n.b. opaque closures cannot be inspected and relied upon like a
+ // normal method since they can get improperly introduced by generated
+ // functions, so if they appeared at all, we will probably serialize
+ // them wrong and segfault. The jl_code_for_staged function should
+ // prevent this from happening, so we do not need to detect that user
+ // error now.
}
- else {
- assert(!needs_recaching(v));
+ }
+ if (jl_is_mtable(v)) {
+ jl_methtable_t *mt = (jl_methtable_t*)v;
+ if (jl_options.trim || jl_options.strip_ir) {
+ record_field_change((jl_value_t**)&mt->backedges, NULL);
}
- // n.b. opaque closures cannot be inspected and relied upon like a
- // normal method since they can get improperly introduced by generated
- // functions, so if they appeared at all, we will probably serialize
- // them wrong and segfault. The jl_code_for_staged function should
- // prevent this from happening, so we do not need to detect that user
- // error now.
- }
- if (s->incremental && jl_is_binding(v)) {
- if (needs_uniquing(v)) {
- jl_binding_t *b = (jl_binding_t*)v;
+ }
+ if (jl_is_binding(v)) {
+ jl_binding_t *b = (jl_binding_t*)v;
+ if (s->incremental && needs_uniquing(v, s->query_cache)) {
jl_queue_for_serialization(s, b->globalref->mod);
jl_queue_for_serialization(s, b->globalref->name);
goto done_fields;
}
+ if (jl_options.trim || jl_options.strip_ir) {
+ record_field_change((jl_value_t**)&b->backedges, NULL);
+ }
}
if (s->incremental && jl_is_globalref(v)) {
jl_globalref_t *gr = (jl_globalref_t*)v;
@@ -929,22 +936,57 @@ static void jl_insert_into_serialization_queue(jl_serializer_state *s, jl_value_
int is_relocatable = jl_is_code_info(inferred) ||
(jl_is_string(inferred) && jl_string_len(inferred) > 0 && jl_string_data(inferred)[jl_string_len(inferred) - 1]);
if (!is_relocatable) {
- record_field_change((jl_value_t**)&ci->inferred, jl_nothing);
+ inferred = jl_nothing;
}
else if (def->source == NULL) {
// don't delete code from optimized opaque closures that can't be reconstructed (and builtins)
}
else if (jl_atomic_load_relaxed(&ci->max_world) != ~(size_t)0 || // delete all code that cannot run
jl_atomic_load_relaxed(&ci->invoke) == jl_fptr_const_return) { // delete all code that just returns a constant
- record_field_change((jl_value_t**)&ci->inferred, jl_nothing);
+ inferred = jl_nothing;
}
else if (native_functions && // don't delete any code if making a ji file
(ci->owner == jl_nothing) && // don't delete code for external interpreters
!effects_foldable(jl_atomic_load_relaxed(&ci->ipo_purity_bits)) && // don't delete code we may want for irinterp
jl_ir_inlining_cost(inferred) == UINT16_MAX) { // don't delete inlineable code
// delete the code now: if we thought it was worth keeping, it would have been converted to object code
+ inferred = jl_nothing;
+ }
+ if (inferred == jl_nothing) {
record_field_change((jl_value_t**)&ci->inferred, jl_nothing);
}
+ else if (jl_is_string(inferred)) {
+ // New roots for external methods
+ if (jl_object_in_image((jl_value_t*)def)) {
+ void **pfound = ptrhash_bp(&s->method_roots_index, def);
+ if (*pfound == HT_NOTFOUND) {
+ *pfound = def;
+ size_t nwithkey = nroots_with_key(def, s->worklist_key);
+ if (nwithkey) {
+ jl_array_ptr_1d_push(s->method_roots_list, (jl_value_t*)def);
+ jl_array_t *newroots = jl_alloc_vec_any(nwithkey);
+ jl_array_ptr_1d_push(s->method_roots_list, (jl_value_t*)newroots);
+ rle_iter_state rootiter = rle_iter_init(0);
+ uint64_t *rletable = NULL;
+ size_t nblocks2 = 0;
+ size_t nroots = jl_array_nrows(def->roots);
+ size_t k = 0;
+ if (def->root_blocks) {
+ rletable = jl_array_data(def->root_blocks, uint64_t);
+ nblocks2 = jl_array_nrows(def->root_blocks);
+ }
+ while (rle_iter_increment(&rootiter, nroots, rletable, nblocks2)) {
+ if (rootiter.key == s->worklist_key) {
+ jl_value_t *newroot = jl_array_ptr_ref(def->roots, rootiter.i);
+ jl_queue_for_serialization(s, newroot);
+ jl_array_ptr_set(newroots, k++, newroot);
+ }
+ }
+ assert(k == nwithkey);
+ }
+ }
+ }
+ }
}
}
}
@@ -997,11 +1039,6 @@ static void jl_insert_into_serialization_queue(jl_serializer_state *s, jl_value_
else if (jl_typetagis(v, jl_module_tag << 4)) {
jl_queue_module_for_serialization(s, (jl_module_t*)v);
}
- else if (jl_is_binding_partition(v)) {
- jl_binding_partition_t *bpart = (jl_binding_partition_t*)v;
- jl_queue_for_serialization_(s, decode_restriction_value(jl_atomic_load_relaxed(&bpart->restriction)), 1, immediate);
- jl_queue_for_serialization_(s, get_replaceable_field((jl_value_t**)&bpart->next, 0), 1, immediate);
- }
else if (layout->nfields > 0) {
if (jl_options.trim) {
if (jl_is_method(v)) {
@@ -1019,12 +1056,23 @@ static void jl_insert_into_serialization_queue(jl_serializer_state *s, jl_value_
record_field_change((jl_value_t **)&tn->mt, NULL);
}
}
+ // TODO: prune any partitions and partition data that has been deleted in the current world
+ //else if (jl_is_binding(v)) {
+ // jl_binding_t *b = (jl_binding_t*)v;
+ //}
+ //else if (jl_is_binding_partition(v)) {
+ // jl_binding_partition_t *bpart = (jl_binding_partition_t*)v;
+ //}
}
char *data = (char*)jl_data_ptr(v);
size_t i, np = layout->npointers;
+ size_t fldidx = 1;
for (i = 0; i < np; i++) {
uint32_t ptr = jl_ptr_offset(t, i);
- int mutabl = t->name->mutabl;
+ size_t offset = jl_ptr_offset(t, i) * sizeof(jl_value_t*);
+ while (offset >= (fldidx == layout->nfields ? jl_datatype_size(t) : jl_field_offset(t, fldidx)))
+ fldidx++;
+ int mutabl = !jl_field_isconst(t, fldidx - 1);
jl_value_t *fld = get_replaceable_field(&((jl_value_t**)data)[ptr], mutabl);
jl_queue_for_serialization_(s, fld, 1, immediate);
}
@@ -1075,9 +1123,9 @@ static void jl_queue_for_serialization_(jl_serializer_state *s, jl_value_t *v, i
// Items that require postorder traversal must visit their children prior to insertion into
// the worklist/serialization_order (and also before their first use)
if (s->incremental && !immediate) {
- if (jl_is_datatype(t) && needs_uniquing(v))
+ if (jl_is_datatype(t) && needs_uniquing(v, s->query_cache))
immediate = 1;
- if (jl_is_datatype_singleton((jl_datatype_t*)t) && needs_uniquing(v))
+ if (jl_is_datatype_singleton((jl_datatype_t*)t) && needs_uniquing(v, s->query_cache))
immediate = 1;
}
@@ -1240,7 +1288,7 @@ static uintptr_t _backref_id(jl_serializer_state *s, jl_value_t *v, jl_array_t *
static void record_uniquing(jl_serializer_state *s, jl_value_t *fld, uintptr_t offset) JL_NOTSAFEPOINT
{
- if (s->incremental && jl_needs_serialization(s, fld) && needs_uniquing(fld)) {
+ if (s->incremental && jl_needs_serialization(s, fld) && needs_uniquing(fld, s->query_cache)) {
if (jl_is_datatype(fld) || jl_is_datatype_singleton((jl_datatype_t*)jl_typeof(fld)))
arraylist_push(&s->uniquing_types, (void*)(uintptr_t)offset);
else if (jl_is_method_instance(fld) || jl_is_binding(fld))
@@ -1298,11 +1346,24 @@ static void jl_write_module(jl_serializer_state *s, uintptr_t item, jl_module_t
arraylist_push(&s->relocs_list, (void*)backref_id(s, jl_atomic_load_relaxed(&m->bindingkeyset), s->link_ids_relocs));
newm->file = NULL;
arraylist_push(&s->relocs_list, (void*)(reloc_offset + offsetof(jl_module_t, file)));
- arraylist_push(&s->relocs_list, (void*)backref_id(s, m->file, s->link_ids_relocs));
+ arraylist_push(&s->relocs_list, (void*)backref_id(s, jl_options.strip_metadata ? jl_empty_sym : m->file , s->link_ids_relocs));
+ if (jl_options.strip_metadata)
+ newm->line = 0;
+ newm->usings_backedges = NULL;
+ arraylist_push(&s->relocs_list, (void*)(reloc_offset + offsetof(jl_module_t, usings_backedges)));
+ arraylist_push(&s->relocs_list, (void*)backref_id(s, get_replaceable_field(&m->usings_backedges, 1), s->link_ids_relocs));
+ newm->scanned_methods = NULL;
+ arraylist_push(&s->relocs_list, (void*)(reloc_offset + offsetof(jl_module_t, scanned_methods)));
+ arraylist_push(&s->relocs_list, (void*)backref_id(s, get_replaceable_field(&m->scanned_methods, 1), s->link_ids_relocs));
+
+ // After reload, everything that has happened in this process happened semantically at
+ // (for .incremental) or before jl_require_world, so reset this flag.
+ jl_atomic_store_relaxed(&newm->export_set_changed_since_require_world, 0);
// write out the usings list
memset(&newm->usings._space, 0, sizeof(newm->usings._space));
if (m->usings.items == &m->usings._space[0]) {
+ newm->usings.items = &newm->usings._space[0];
// Push these relocations here, to keep them in order. This pairs with the `newm->usings.items = ` below.
arraylist_push(&s->relocs_list, (void*)(reloc_offset + offsetof(jl_module_t, usings.items)));
arraylist_push(&s->relocs_list, (void*)(((uintptr_t)DataRef << RELOC_TAG_OFFSET) + item));
@@ -1314,9 +1375,9 @@ static void jl_write_module(jl_serializer_state *s, uintptr_t item, jl_module_t
newm_data->min_world = data->min_world;
newm_data->max_world = data->max_world;
if (s->incremental) {
- if (data->max_world != (size_t)-1)
+ if (data->max_world != ~(size_t)0)
newm_data->max_world = 0;
- newm_data->min_world = 0;
+ newm_data->min_world = jl_require_world;
}
arraylist_push(&s->relocs_list, (void*)(reloc_offset + offsetof(jl_module_t, usings._space[3*i])));
arraylist_push(&s->relocs_list, (void*)backref_id(s, data->mod, s->link_ids_relocs));
@@ -1327,12 +1388,19 @@ static void jl_write_module(jl_serializer_state *s, uintptr_t item, jl_module_t
newm->usings.items = (void**)tot;
arraylist_push(&s->relocs_list, (void*)(reloc_offset + offsetof(jl_module_t, usings.items)));
arraylist_push(&s->relocs_list, (void*)(((uintptr_t)DataRef << RELOC_TAG_OFFSET) + item));
+ newm = NULL; // `write_*(s->s)` below may invalidate `newm`, so defensively set it to NULL
size_t i;
for (i = 0; i < module_usings_length(m); i++) {
struct _jl_module_using *data = module_usings_getidx(m, i);
write_pointerfield(s, (jl_value_t*)data->mod);
- write_uint(s->s, data->min_world);
- write_uint(s->s, data->max_world);
+ if (s->incremental) {
+ // TODO: Drop dead ones entirely?
+ write_uint(s->s, jl_require_world);
+ write_uint(s->s, data->max_world == ~(size_t)0 ? ~(size_t)0 : 1);
+ } else {
+ write_uint(s->s, data->min_world);
+ write_uint(s->s, data->max_world);
+ }
static_assert(sizeof(struct _jl_module_using) == 3*sizeof(void*), "_jl_module_using mismatch");
tot += sizeof(struct _jl_module_using);
}
@@ -1444,7 +1512,7 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED
// write header
if (object_id_expected)
write_uint(f, jl_object_id(v));
- if (s->incremental && jl_needs_serialization(s, (jl_value_t*)t) && needs_uniquing((jl_value_t*)t))
+ if (s->incremental && jl_needs_serialization(s, (jl_value_t*)t) && needs_uniquing((jl_value_t*)t, s->query_cache))
arraylist_push(&s->uniquing_types, (void*)(uintptr_t)(ios_pos(f)|1));
if (f == s->const_data)
write_uint(s->const_data, ((uintptr_t)t->smalltag << 4) | GC_OLD_MARKED | GC_IN_IMAGE);
@@ -1455,8 +1523,8 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED
layout_table.items[item] = (void*)(reloc_offset | (f == s->const_data)); // store the inverse mapping of `serialization_order` (`id` => object-as-streampos)
if (s->incremental) {
- if (needs_uniquing(v)) {
- if (jl_typetagis(v, jl_binding_type)) {
+ if (needs_uniquing(v, s->query_cache)) {
+ if (jl_is_binding(v)) {
jl_binding_t *b = (jl_binding_t*)v;
if (b->globalref == NULL)
jl_error("Binding cannot be serialized"); // no way (currently) to recover its identity
@@ -1484,7 +1552,7 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED
assert(jl_is_datatype_singleton(t) && "unreachable");
}
}
- else if (needs_recaching(v)) {
+ else if (needs_recaching(v, s->query_cache)) {
arraylist_push(jl_is_datatype(v) ? &s->fixup_types : &s->fixup_objs, (void*)reloc_offset);
}
}
@@ -1666,44 +1734,6 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED
ios_write(s->const_data, (char*)pdata, nb);
write_pointer(f);
}
- else if (jl_is_binding_partition(v)) {
- jl_binding_partition_t *bpart = (jl_binding_partition_t*)v;
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
- jl_value_t *restriction_val = decode_restriction_value(pku);
- static_assert(offsetof(jl_binding_partition_t, restriction) == 0, "BindingPartition layout mismatch");
- write_pointerfield(s, restriction_val);
-#ifndef _P64
- write_uint(f, decode_restriction_kind(pku));
-#endif
- size_t max_world = jl_atomic_load_relaxed(&bpart->max_world);
- if (s->incremental) {
- if (max_world == ~(size_t)0) {
- // Still valid. Will be considered to be defined in jl_require_world
- // after reload, which is the first world before new code runs.
- // We use this as a quick check to determine whether a binding was
- // invalidated. If a binding was first defined in or before
- // jl_require_world, then we can assume that all precompile processes
- // will have seen it consistently.
- write_uint(f, jl_require_world);
- write_uint(f, max_world);
- } else {
- // The world will not be reachable after loading
- write_uint(f, 1);
- write_uint(f, 0);
- }
- } else {
- write_uint(f, bpart->min_world);
- write_uint(f, max_world);
- }
- write_pointerfield(s, (jl_value_t*)jl_atomic_load_relaxed(&bpart->next));
-#ifdef _P64
- write_uint(f, decode_restriction_kind(pku)); // This will be moved back into place during deserialization (if necessary)
- static_assert(sizeof(jl_binding_partition_t) == 5*sizeof(void*), "BindingPartition layout mismatch");
-#else
- write_uint(f, 0);
- static_assert(sizeof(jl_binding_partition_t) == 6*sizeof(void*), "BindingPartition layout mismatch");
-#endif
- }
else {
// Generic object::DataType serialization by field
const char *data = (const char*)v;
@@ -1716,7 +1746,7 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED
tot = offset;
size_t fsz = jl_field_size(t, i);
jl_value_t *replace = (jl_value_t*)ptrhash_get(&bits_replace, (void*)slot);
- if (replace != HT_NOTFOUND) {
+ if (replace != HT_NOTFOUND && fsz > 0) {
assert(t->name->mutabl && !jl_field_isptr(t, i));
jl_value_t *rty = jl_typeof(replace);
size_t sz = jl_datatype_size(rty);
@@ -1743,9 +1773,12 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED
}
size_t np = t->layout->npointers;
+ size_t fldidx = 1;
for (i = 0; i < np; i++) {
size_t offset = jl_ptr_offset(t, i) * sizeof(jl_value_t*);
- int mutabl = t->name->mutabl;
+ while (offset >= (fldidx == nf ? jl_datatype_size(t) : jl_field_offset(t, fldidx)))
+ fldidx++;
+ int mutabl = !jl_field_isconst(t, fldidx - 1);
jl_value_t *fld = get_replaceable_field((jl_value_t**)&data[offset], mutabl);
size_t fld_pos = offset + reloc_offset;
if (fld != NULL) {
@@ -1765,15 +1798,33 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED
jl_typemap_entry_t *newentry = (jl_typemap_entry_t*)&s->s->buf[reloc_offset];
if (jl_atomic_load_relaxed(&newentry->max_world) == ~(size_t)0) {
if (jl_atomic_load_relaxed(&newentry->min_world) > 1) {
- jl_atomic_store_release(&newentry->min_world, ~(size_t)0);
- jl_atomic_store_release(&newentry->max_world, WORLD_AGE_REVALIDATION_SENTINEL);
+ jl_atomic_store_relaxed(&newentry->min_world, ~(size_t)0);
+ jl_atomic_store_relaxed(&newentry->max_world, WORLD_AGE_REVALIDATION_SENTINEL);
arraylist_push(&s->fixup_objs, (void*)reloc_offset);
}
}
else {
// garbage newentry - delete it :(
- jl_atomic_store_release(&newentry->min_world, 1);
- jl_atomic_store_release(&newentry->max_world, 0);
+ jl_atomic_store_relaxed(&newentry->min_world, 1);
+ jl_atomic_store_relaxed(&newentry->max_world, 0);
+ }
+ }
+ else if (s->incremental && jl_is_binding_partition(v)) {
+ jl_binding_partition_t *newbpart = (jl_binding_partition_t*)&s->s->buf[reloc_offset];
+ size_t max_world = jl_atomic_load_relaxed(&newbpart->max_world);
+ if (max_world == ~(size_t)0) {
+ // Still valid. Will be considered to be defined in jl_require_world
+ // after reload, which is the first world before new code runs.
+ // We use this as a quick check to determine whether a binding was
+ // invalidated. If a binding was first defined in or before
+ // jl_require_world, then we can assume that all precompile processes
+ // will have seen it consistently.
+ jl_atomic_store_relaxed(&newbpart->min_world, jl_require_world);
+ }
+ else {
+ // The world will not be reachable after loading
+ jl_atomic_store_relaxed(&newbpart->min_world, 1);
+ jl_atomic_store_relaxed(&newbpart->max_world, 0);
}
}
else if (jl_is_method(v)) {
@@ -1782,23 +1833,16 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED
jl_method_t *m = (jl_method_t*)v;
jl_method_t *newm = (jl_method_t*)&f->buf[reloc_offset];
if (s->incremental) {
- if (jl_atomic_load_relaxed(&newm->deleted_world) == ~(size_t)0) {
- if (jl_atomic_load_relaxed(&newm->primary_world) > 1) {
- jl_atomic_store_relaxed(&newm->primary_world, ~(size_t)0); // min-world
- jl_atomic_store_relaxed(&newm->deleted_world, 1); // max_world
- arraylist_push(&s->fixup_objs, (void*)reloc_offset);
- }
- }
- else {
- jl_atomic_store_relaxed(&newm->primary_world, 1);
- jl_atomic_store_relaxed(&newm->deleted_world, 0);
+ if (jl_atomic_load_relaxed(&newm->primary_world) > 1) {
+ jl_atomic_store_relaxed(&newm->primary_world, ~(size_t)0); // min-world
+ int dispatch_status = jl_atomic_load_relaxed(&newm->dispatch_status);
+ jl_atomic_store_relaxed(&newm->dispatch_status, dispatch_status & METHOD_SIG_LATEST_ONLY ? 0 : METHOD_SIG_PRECOMPILE_MANY);
+ arraylist_push(&s->fixup_objs, (void*)reloc_offset);
}
}
else {
newm->nroots_sysimg = m->roots ? jl_array_len(m->roots) : 0;
}
- if (m->ccallable)
- arraylist_push(&s->ccallable_list, (void*)reloc_offset);
}
else if (jl_is_method_instance(v)) {
assert(f == s->s);
@@ -1827,6 +1871,7 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED
jl_atomic_store_release(&newci->max_world, 0);
}
}
+ jl_atomic_store_relaxed(&newci->time_compile, 0.0);
jl_atomic_store_relaxed(&newci->invoke, NULL);
jl_atomic_store_relaxed(&newci->specsigflags, 0);
jl_atomic_store_relaxed(&newci->specptr.fptr, NULL);
@@ -1859,6 +1904,9 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED
else if (invokeptr_id == -4) {
fptr_id = JL_API_OC_CALL;
}
+ else if (invokeptr_id == -5) {
+ abort();
+ }
else {
assert(invokeptr_id > 0);
ios_ensureroom(s->fptr_record, invokeptr_id * sizeof(void*));
@@ -1927,7 +1975,7 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED
}
}
void *superidx = ptrhash_get(&serialization_order, dt->super);
- if (s->incremental && superidx != HT_NOTFOUND && from_seroder_entry(superidx) > item && needs_uniquing((jl_value_t*)dt->super))
+ if (s->incremental && superidx != HT_NOTFOUND && from_seroder_entry(superidx) > item && needs_uniquing((jl_value_t*)dt->super, s->query_cache))
arraylist_push(&s->uniquing_super, dt->super);
}
else if (jl_is_typename(v)) {
@@ -2475,29 +2523,6 @@ static void jl_root_new_gvars(jl_serializer_state *s, jl_image_t *image, uint32_
}
}
-
-static void jl_compile_extern(jl_method_t *m, void *sysimg_handle) JL_GC_DISABLED
-{
- // install ccallable entry point in JIT
- assert(m); // makes clang-sa happy
- jl_svec_t *sv = m->ccallable;
- int success = jl_compile_extern_c(NULL, NULL, sysimg_handle, jl_svecref(sv, 0), jl_svecref(sv, 1));
- if (!success)
- jl_safe_printf("WARNING: @ccallable was already defined for this method name\n"); // enjoy a very bad time
- assert(success || !sysimg_handle);
-}
-
-
-static void jl_reinit_ccallable(arraylist_t *ccallable_list, char *base, void *sysimg_handle)
-{
- for (size_t i = 0; i < ccallable_list->len; i++) {
- uintptr_t item = (uintptr_t)ccallable_list->items[i];
- jl_method_t *m = (jl_method_t*)(base + item);
- jl_compile_extern(m, sysimg_handle);
- }
-}
-
-
// Code below helps slim down the images by
// removing cached types not referenced in the stream
static jl_svec_t *jl_prune_type_cache_hash(jl_svec_t *cache) JL_GC_DISABLED
@@ -2542,11 +2567,12 @@ static void jl_prune_type_cache_linear(jl_svec_t *cache)
jl_svecset(cache, ins++, jl_nothing);
}
+
uint_t bindingkey_hash(size_t idx, jl_value_t *data);
static void jl_prune_module_bindings(jl_module_t * m) JL_GC_DISABLED
{
- jl_svec_t * bindings = jl_atomic_load_relaxed(&m->bindings);
+ jl_svec_t *bindings = jl_atomic_load_relaxed(&m->bindings);
size_t l = jl_svec_len(bindings), i;
arraylist_t bindings_list;
arraylist_new(&bindings_list, 0);
@@ -2557,15 +2583,12 @@ static void jl_prune_module_bindings(jl_module_t * m) JL_GC_DISABLED
if (ti == jl_nothing)
continue;
jl_binding_t *ref = ((jl_binding_t*)ti);
- if (!((ptrhash_get(&serialization_order, ref) == HT_NOTFOUND) &&
- (ptrhash_get(&serialization_order, ref->globalref) == HT_NOTFOUND))) {
- jl_svecset(bindings, i, jl_nothing);
+ if (ptrhash_get(&serialization_order, ref) != HT_NOTFOUND)
arraylist_push(&bindings_list, ref);
- }
}
- jl_genericmemory_t* bindingkeyset = jl_atomic_load_relaxed(&m->bindingkeyset);
+ jl_genericmemory_t *bindingkeyset = jl_atomic_load_relaxed(&m->bindingkeyset);
_Atomic(jl_genericmemory_t*)bindingkeyset2;
- jl_atomic_store_relaxed(&bindingkeyset2,(jl_genericmemory_t*)jl_an_empty_memory_any);
+ jl_atomic_store_relaxed(&bindingkeyset2, (jl_genericmemory_t*)jl_an_empty_memory_any);
jl_svec_t *bindings2 = jl_alloc_svec_uninit(bindings_list.len);
for (i = 0; i < bindings_list.len; i++) {
jl_binding_t *ref = (jl_binding_t*)bindings_list.items[i];
@@ -2646,7 +2669,7 @@ static void strip_specializations_(jl_method_instance_t *mi)
record_field_change((jl_value_t**)&codeinst->debuginfo, (jl_value_t*)jl_nulldebuginfo);
codeinst = jl_atomic_load_relaxed(&codeinst->next);
}
- if (jl_options.strip_ir) {
+ if (jl_options.trim || jl_options.strip_ir) {
record_field_change((jl_value_t**)&mi->backedges, NULL);
}
}
@@ -2719,8 +2742,6 @@ static int strip_all_codeinfos__(jl_typemap_entry_t *def, void *_env)
static int strip_all_codeinfos_(jl_methtable_t *mt, void *_env)
{
- if (jl_options.strip_ir && mt->backedges)
- record_field_change((jl_value_t**)&mt->backedges, NULL);
return jl_typemap_visitor(jl_atomic_load_relaxed(&mt->defs), strip_all_codeinfos__, NULL);
}
@@ -2729,6 +2750,63 @@ static void jl_strip_all_codeinfos(void)
jl_foreach_reachable_mtable(strip_all_codeinfos_, NULL);
}
+static int strip_module(jl_module_t *m, jl_sym_t *docmeta_sym)
+{
+ size_t world = jl_atomic_load_relaxed(&jl_world_counter);
+ jl_svec_t *table = jl_atomic_load_relaxed(&m->bindings);
+ for (size_t i = 0; i < jl_svec_len(table); i++) {
+ jl_binding_t *b = (jl_binding_t*)jl_svecref(table, i);
+ if ((void*)b == jl_nothing)
+ break;
+ jl_sym_t *name = b->globalref->name;
+ jl_value_t *v = jl_get_binding_value_in_world(b, world);
+ if (v) {
+ if (jl_is_module(v)) {
+ jl_module_t *child = (jl_module_t*)v;
+ if (child != m && child->parent == m && child->name == name) {
+ // this is the original/primary binding for the submodule
+ if (!strip_module(child, docmeta_sym))
+ return 0;
+ }
+ }
+ }
+ if (name == docmeta_sym) {
+ if (jl_atomic_load_relaxed(&b->value))
+ record_field_change((jl_value_t**)&b->value, jl_nothing);
+ // TODO: this is a pretty stupidly unsound way to do this, but it is way to late here to do this correctly (by calling delete_binding and getting an updated world age then dropping all partitions from older worlds)
+ jl_binding_partition_t *bp = jl_atomic_load_relaxed(&b->partitions);
+ while (bp) {
+ if (jl_bkind_is_defined_constant(jl_binding_kind(bp))) {
+ // XXX: bp->kind = PARTITION_KIND_UNDEF_CONST;
+ record_field_change((jl_value_t**)&bp->restriction, NULL);
+ }
+ bp = jl_atomic_load_relaxed(&bp->next);
+ }
+ }
+ }
+ return 1;
+}
+
+
+static void jl_strip_all_docmeta(jl_array_t *mod_array)
+{
+ jl_sym_t *docmeta_sym = NULL;
+ if (jl_base_module) {
+ jl_value_t *docs = jl_get_global(jl_base_module, jl_symbol("Docs"));
+ if (docs && jl_is_module(docs)) {
+ docmeta_sym = (jl_sym_t*)jl_get_global((jl_module_t*)docs, jl_symbol("META"));
+ }
+ }
+ if (!docmeta_sym)
+ return;
+ for (size_t i = 0; i < jl_array_nrows(mod_array); i++) {
+ jl_module_t *m = (jl_module_t*)jl_array_ptr_ref(mod_array, i);
+ assert(jl_is_module(m));
+ if (m->parent == m) // some toplevel modules (really just Base) aren't actually
+ strip_module(m, docmeta_sym);
+ }
+}
+
// --- entry points ---
jl_genericmemory_t *jl_global_roots_list;
@@ -2858,17 +2936,17 @@ JL_DLLEXPORT jl_value_t *jl_as_global_root(jl_value_t *val, int insert)
return val;
}
-static void jl_prepare_serialization_data(jl_array_t *mod_array, jl_array_t *newly_inferred, uint64_t worklist_key,
+static void jl_prepare_serialization_data(jl_array_t *mod_array, jl_array_t *newly_inferred,
/* outputs */ jl_array_t **extext_methods JL_REQUIRE_ROOTED_SLOT,
jl_array_t **new_ext_cis JL_REQUIRE_ROOTED_SLOT,
- jl_array_t **method_roots_list JL_REQUIRE_ROOTED_SLOT,
- jl_array_t **edges JL_REQUIRE_ROOTED_SLOT)
+ jl_array_t **edges JL_REQUIRE_ROOTED_SLOT,
+ jl_query_cache *query_cache)
{
// extext_methods: [method1, ...], worklist-owned "extending external" methods added to functions owned by modules outside the worklist
// edges: [caller1, ext_targets, ...] for worklist-owned methods calling external methods
// Save the inferred code from newly inferred, external methods
- *new_ext_cis = queue_external_cis(newly_inferred);
+ *new_ext_cis = queue_external_cis(newly_inferred, query_cache);
// Collect method extensions and edges data
*extext_methods = jl_alloc_vec_any(0);
@@ -2885,15 +2963,12 @@ static void jl_prepare_serialization_data(jl_array_t *mod_array, jl_array_t *new
}
if (edges) {
+ // Extract `edges` now (from info prepared by jl_collect_methcache_from_mod)
size_t world = jl_atomic_load_acquire(&jl_world_counter);
- // Extract `new_ext_cis` and `edges` now (from info prepared by jl_collect_methcache_from_mod)
- *method_roots_list = jl_alloc_vec_any(0);
- // Collect the new method roots for external specializations
- jl_collect_new_roots(*method_roots_list, *new_ext_cis, worklist_key);
*edges = jl_alloc_vec_any(0);
jl_collect_internal_cis(*edges, world);
}
- internal_methods = NULL;
+ internal_methods = NULL; // global
JL_GC_POP();
}
@@ -2901,14 +2976,16 @@ static void jl_prepare_serialization_data(jl_array_t *mod_array, jl_array_t *new
// In addition to the system image (where `worklist = NULL`), this can also save incremental images with external linkage
static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array,
jl_array_t *worklist, jl_array_t *extext_methods,
- jl_array_t *new_ext_cis, jl_array_t *method_roots_list,
- jl_array_t *edges) JL_GC_DISABLED
+ jl_array_t *new_ext_cis, jl_array_t *edges,
+ jl_query_cache *query_cache)
{
htable_new(&field_replace, 0);
htable_new(&bits_replace, 0);
// strip metadata and IR when requested
- if (jl_options.strip_metadata || jl_options.strip_ir)
+ if (jl_options.strip_metadata || jl_options.strip_ir) {
jl_strip_all_codeinfos();
+ jl_strip_all_docmeta(mod_array);
+ }
// collect needed methods and replace method tables that are in the tags array
htable_new(&new_methtables, 0);
arraylist_t MIs;
@@ -2934,7 +3011,7 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array,
if (jl_field_isptr(st, field)) {
record_field_change((jl_value_t**)fldaddr, newval);
}
- else {
+ else if (jl_field_size(st, field) > 0) {
// replace the bits
ptrhash_put(&bits_replace, (void*)fldaddr, newval);
// and any pointers inside
@@ -3009,6 +3086,7 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array,
ios_mem(&gvar_record, 0);
ios_mem(&fptr_record, 0);
jl_serializer_state s = {0};
+ s.query_cache = query_cache;
s.incremental = !(worklist == NULL);
s.s = &sysimg;
s.const_data = &const_data;
@@ -3026,15 +3104,19 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array,
arraylist_new(&s.uniquing_objs, 0);
arraylist_new(&s.fixup_types, 0);
arraylist_new(&s.fixup_objs, 0);
- arraylist_new(&s.ccallable_list, 0);
s.buildid_depmods_idxs = image_to_depmodidx(mod_array);
s.link_ids_relocs = jl_alloc_array_1d(jl_array_int32_type, 0);
s.link_ids_gctags = jl_alloc_array_1d(jl_array_int32_type, 0);
s.link_ids_gvars = jl_alloc_array_1d(jl_array_int32_type, 0);
s.link_ids_external_fnvars = jl_alloc_array_1d(jl_array_int32_type, 0);
+ s.method_roots_list = NULL;
+ htable_new(&s.method_roots_index, 0);
+ if (worklist) {
+ s.method_roots_list = jl_alloc_vec_any(0);
+ s.worklist_key = jl_worklist_key(worklist);
+ }
jl_value_t **const*const tags = get_tags(); // worklist == NULL ? get_tags() : NULL;
-
if (worklist == NULL) {
// empty!(Core.ARGS)
if (jl_core_module != NULL) {
@@ -3049,12 +3131,6 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array,
gmp_limb_size = jl_unbox_long(jl_get_global((jl_module_t*)jl_get_global(jl_base_module, jl_symbol("GMP")),
jl_symbol("BITS_PER_LIMB"))) / 8;
}
- if (jl_base_module) {
- jl_value_t *docs = jl_get_global(jl_base_module, jl_symbol("Docs"));
- if (docs && jl_is_module(docs)) {
- jl_docmeta_sym = (jl_sym_t*)jl_get_global((jl_module_t*)docs, jl_symbol("META"));
- }
- }
jl_genericmemory_t *global_roots_list = NULL;
jl_genericmemory_t *global_roots_keyset = NULL;
@@ -3079,8 +3155,6 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array,
jl_queue_for_serialization(&s, extext_methods);
// Queue the new specializations
jl_queue_for_serialization(&s, new_ext_cis);
- // Queue the new roots
- jl_queue_for_serialization(&s, method_roots_list);
// Queue the edges
jl_queue_for_serialization(&s, edges);
}
@@ -3091,7 +3165,15 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array,
if (jl_options.trim)
record_gvars(&s, &MIs);
jl_serialize_reachable(&s);
- // step 1.3: prune (garbage collect) special weak references from the jl_global_roots_list
+ // Beyond this point, all content should already have been visited, so now we can prune
+ // the rest and add some internal root arrays.
+ // step 1.3: include some other special roots
+ if (s.incremental) {
+ // Queue the new roots array
+ jl_queue_for_serialization(&s, s.method_roots_list);
+ jl_serialize_reachable(&s);
+ }
+ // step 1.4: prune (garbage collect) special weak references from the jl_global_roots_list
if (worklist == NULL) {
global_roots_list = jl_alloc_memory_any(0);
global_roots_keyset = jl_alloc_memory_any(0);
@@ -3107,16 +3189,18 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array,
jl_queue_for_serialization(&s, global_roots_keyset);
jl_serialize_reachable(&s);
}
- // step 1.4: prune (garbage collect) some special weak references from
- // built-in type caches too
+ // step 1.5: prune (garbage collect) some special weak references known caches
for (i = 0; i < serialization_queue.len; i++) {
jl_value_t *v = (jl_value_t*)serialization_queue.items[i];
if (jl_options.trim) {
- if (jl_is_method(v)){
+ if (jl_is_method(v)) {
jl_method_t *m = (jl_method_t*)v;
jl_value_t *specializations_ = jl_atomic_load_relaxed(&m->specializations);
- if (!jl_is_svec(specializations_))
+ if (!jl_is_svec(specializations_)) {
+ if (ptrhash_get(&serialization_order, specializations_) == HT_NOTFOUND)
+ record_field_change((jl_value_t **)&m->specializations, (jl_value_t*)jl_emptysvec);
continue;
+ }
jl_svec_t *specializations = (jl_svec_t *)specializations_;
size_t l = jl_svec_len(specializations), i;
@@ -3127,7 +3211,8 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array,
if (ptrhash_get(&serialization_order, mi) == HT_NOTFOUND)
jl_svecset(specializations, i, jl_nothing);
}
- } else if (jl_is_module(v)) {
+ }
+ else if (jl_is_module(v)) {
jl_prune_module_bindings((jl_module_t*)v);
}
}
@@ -3259,7 +3344,7 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array,
jl_write_value(&s, jl_module_init_order);
jl_write_value(&s, extext_methods);
jl_write_value(&s, new_ext_cis);
- jl_write_value(&s, method_roots_list);
+ jl_write_value(&s, s.method_roots_list);
jl_write_value(&s, edges);
}
write_uint32(f, jl_array_len(s.link_ids_gctags));
@@ -3271,7 +3356,6 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array,
write_uint32(f, jl_array_len(s.link_ids_external_fnvars));
ios_write(f, (char*)jl_array_data(s.link_ids_external_fnvars, uint32_t), jl_array_len(s.link_ids_external_fnvars) * sizeof(uint32_t));
write_uint32(f, external_fns_begin);
- jl_write_arraylist(s.s, &s.ccallable_list);
}
assert(object_worklist.len == 0);
@@ -3283,13 +3367,13 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array,
arraylist_free(&s.uniquing_objs);
arraylist_free(&s.fixup_types);
arraylist_free(&s.fixup_objs);
- arraylist_free(&s.ccallable_list);
arraylist_free(&s.memowner_list);
arraylist_free(&s.memref_list);
arraylist_free(&s.relocs_list);
arraylist_free(&s.gctags_list);
arraylist_free(&gvars);
arraylist_free(&external_fns);
+ htable_free(&s.method_roots_index);
htable_free(&field_replace);
htable_free(&bits_replace);
htable_free(&serialization_order);
@@ -3350,18 +3434,20 @@ JL_DLLEXPORT void jl_create_system_image(void **_native_data, jl_array_t *workli
}
jl_array_t *mod_array = NULL, *extext_methods = NULL, *new_ext_cis = NULL;
- jl_array_t *method_roots_list = NULL, *edges = NULL;
+ jl_array_t *edges = NULL;
int64_t checksumpos = 0;
int64_t checksumpos_ff = 0;
int64_t datastartpos = 0;
- JL_GC_PUSH5(&mod_array, &extext_methods, &new_ext_cis, &method_roots_list, &edges);
+ JL_GC_PUSH4(&mod_array, &extext_methods, &new_ext_cis, &edges);
+ jl_query_cache query_cache;
+ init_query_cache(&query_cache);
+
+ mod_array = jl_get_loaded_modules(); // __toplevel__ modules loaded in this session (from Base.loaded_modules_array)
if (worklist) {
- mod_array = jl_get_loaded_modules(); // __toplevel__ modules loaded in this session (from Base.loaded_modules_array)
// Generate _native_data`
if (_native_data != NULL) {
- jl_prepare_serialization_data(mod_array, newly_inferred, jl_worklist_key(worklist),
- &extext_methods, &new_ext_cis, NULL, NULL);
+ jl_prepare_serialization_data(mod_array, newly_inferred, &extext_methods, &new_ext_cis, NULL, &query_cache);
jl_precompile_toplevel_module = (jl_module_t*)jl_array_ptr_ref(worklist, jl_array_len(worklist)-1);
*_native_data = jl_precompile_worklist(worklist, extext_methods, new_ext_cis);
jl_precompile_toplevel_module = NULL;
@@ -3392,8 +3478,7 @@ JL_DLLEXPORT void jl_create_system_image(void **_native_data, jl_array_t *workli
assert((ct->reentrant_timing & 0b1110) == 0);
ct->reentrant_timing |= 0b1000;
if (worklist) {
- jl_prepare_serialization_data(mod_array, newly_inferred, jl_worklist_key(worklist),
- &extext_methods, &new_ext_cis, &method_roots_list, &edges);
+ jl_prepare_serialization_data(mod_array, newly_inferred, &extext_methods, &new_ext_cis, &edges, &query_cache);
if (!emit_split) {
write_int32(f, 0); // No clone_targets
write_padding(f, LLT_ALIGN(ios_pos(f), JL_CACHE_BYTE_ALIGNMENT) - ios_pos(f));
@@ -3405,7 +3490,7 @@ JL_DLLEXPORT void jl_create_system_image(void **_native_data, jl_array_t *workli
}
if (_native_data != NULL)
native_functions = *_native_data;
- jl_save_system_image_to_stream(ff, mod_array, worklist, extext_methods, new_ext_cis, method_roots_list, edges);
+ jl_save_system_image_to_stream(ff, mod_array, worklist, extext_methods, new_ext_cis, edges, &query_cache);
if (_native_data != NULL)
native_functions = NULL;
// make sure we don't run any Julia code concurrently before this point
@@ -3434,6 +3519,8 @@ JL_DLLEXPORT void jl_create_system_image(void **_native_data, jl_array_t *workli
}
}
+ destroy_query_cache(&query_cache);
+
JL_GC_POP();
*s = f;
if (emit_split)
@@ -3443,33 +3530,107 @@ JL_DLLEXPORT void jl_create_system_image(void **_native_data, jl_array_t *workli
JL_DLLEXPORT size_t ios_write_direct(ios_t *dest, ios_t *src);
-// Takes in a path of the form "usr/lib/julia/sys.so" (jl_restore_system_image should be passed the same string)
-JL_DLLEXPORT void jl_preload_sysimg_so(const char *fname)
+// Takes in a path of the form "usr/lib/julia/sys.so"
+JL_DLLEXPORT jl_image_buf_t jl_preload_sysimg(const char *fname)
{
- if (jl_sysimg_handle)
- return; // embedded target already called jl_set_sysimg_so
+ if (jl_sysimage_buf.kind != JL_IMAGE_KIND_NONE)
+ return jl_sysimage_buf;
char *dot = (char*) strrchr(fname, '.');
int is_ji = (dot && !strcmp(dot, ".ji"));
- // Get handle to sys.so
- if (!is_ji) // .ji extension => load .ji file only
- jl_set_sysimg_so(jl_load_dynamic_library(fname, JL_RTLD_LOCAL | JL_RTLD_NOW, 1));
+ if (is_ji) {
+ // .ji extension => load .ji file only
+ ios_t f;
+
+ if (ios_file(&f, fname, 1, 0, 0, 0) == NULL)
+ jl_errorf("System image file \"%s\" not found.", fname);
+ ios_bufmode(&f, bm_none);
+
+ ios_seek_end(&f);
+ size_t len = ios_pos(&f);
+ char *sysimg = (char*)jl_gc_perm_alloc(len, 0, 64, 0);
+ ios_seek(&f, 0);
+
+ if (ios_readall(&f, sysimg, len) != len)
+ jl_errorf("Error reading system image file.");
+
+ ios_close(&f);
+
+ jl_sysimage_buf = (jl_image_buf_t) {
+ .kind = JL_IMAGE_KIND_JI,
+ .handle = NULL,
+ .pointers = NULL,
+ .data = sysimg,
+ .size = len,
+ .base = 0,
+ };
+ return jl_sysimage_buf;
+ } else {
+ // Get handle to sys.so
+ return jl_set_sysimg_so(jl_load_dynamic_library(fname, JL_RTLD_LOCAL | JL_RTLD_NOW, 1));
+ }
}
-// Allow passing in a module handle directly, rather than a path
-JL_DLLEXPORT void jl_set_sysimg_so(void *handle)
+// From a shared library handle, verify consistency and return a jl_image_buf_t
+static jl_image_buf_t get_image_buf(void *handle, int is_pkgimage)
{
+ size_t *plen;
+ const char *data;
+ const void *pointers;
+ uint64_t base;
+
+ // verify that the linker resolved the symbols in this image against ourselves (libjulia-internal)
void** (*get_jl_RTLD_DEFAULT_handle_addr)(void) = NULL;
if (handle != jl_RTLD_DEFAULT_handle) {
int symbol_found = jl_dlsym(handle, "get_jl_RTLD_DEFAULT_handle_addr", (void **)&get_jl_RTLD_DEFAULT_handle_addr, 0);
if (!symbol_found || (void*)&jl_RTLD_DEFAULT_handle != (get_jl_RTLD_DEFAULT_handle_addr()))
- jl_error("System image file failed consistency check: maybe opened the wrong version?");
+ jl_error("Image file failed consistency check: maybe opened the wrong version?");
}
- if (jl_options.cpu_target == NULL)
- jl_options.cpu_target = "native";
- jl_sysimg_handle = handle;
- sysimage = jl_init_processor_sysimg(handle);
+
+ // verification passed, lookup the buffer pointers
+ if (jl_system_image_size == 0 || is_pkgimage) {
+ // in the usual case, the sysimage was not statically linked to libjulia-internal
+ // look up the external sysimage symbols via the dynamic linker
+ jl_dlsym(handle, "jl_system_image_size", (void **)&plen, 1);
+ jl_dlsym(handle, "jl_system_image_data", (void **)&data, 1);
+ jl_dlsym(handle, "jl_image_pointers", (void**)&pointers, 1);
+ } else {
+ // the sysimage was statically linked directly against libjulia-internal
+ // use the internal symbols
+ plen = &jl_system_image_size;
+ pointers = &jl_image_pointers;
+ data = &jl_system_image_data;
+ }
+
+#ifdef _OS_WINDOWS_
+ base = (intptr_t)handle;
+#else
+ Dl_info dlinfo;
+ if (dladdr((void*)pointers, &dlinfo) != 0)
+ base = (intptr_t)dlinfo.dli_fbase;
+ else
+ base = 0;
+#endif
+
+ return (jl_image_buf_t) {
+ .kind = JL_IMAGE_KIND_SO,
+ .handle = handle,
+ .pointers = pointers,
+ .data = data,
+ .size = *plen,
+ .base = base,
+ };
+}
+
+// Allow passing in a module handle directly, rather than a path
+JL_DLLEXPORT jl_image_buf_t jl_set_sysimg_so(void *handle)
+{
+ if (jl_sysimage_buf.kind != JL_IMAGE_KIND_NONE)
+ return jl_sysimage_buf;
+
+ jl_sysimage_buf = get_image_buf(handle, /* is_pkgimage */ 0);
+ return jl_sysimage_buf;
}
#ifndef JL_NDEBUG
@@ -3489,12 +3650,100 @@ extern void export_jl_small_typeof(void);
// into the native code of the image. See https://github.com/JuliaLang/julia/pull/52123#issuecomment-1959965395.
int IMAGE_NATIVE_CODE_TAINTED = 0;
-static void jl_restore_system_image_from_stream_(ios_t *f, jl_image_t *image, jl_array_t *depmods, uint64_t checksum,
+// TODO: This should possibly be in Julia
+static int jl_validate_binding_partition(jl_binding_t *b, jl_binding_partition_t *bpart, size_t mod_idx, int unchanged_implicit, int no_replacement)
+{
+ if (jl_atomic_load_relaxed(&bpart->max_world) != ~(size_t)0)
+ return 1;
+ size_t raw_kind = bpart->kind;
+ enum jl_partition_kind kind = (enum jl_partition_kind)(raw_kind & PARTITION_MASK_KIND);
+ if (!unchanged_implicit && jl_bkind_is_some_implicit(kind)) {
+ // TODO: Should we actually update this in place or delete it from the partitions list
+ // and allocate a fresh bpart?
+ jl_update_loaded_bpart(b, bpart);
+ bpart->kind |= (raw_kind & PARTITION_MASK_FLAG);
+ if (jl_atomic_load_relaxed(&bpart->min_world) > jl_require_world)
+ goto invalidated;
+ }
+ if (!jl_bkind_is_some_explicit_import(kind) && kind != PARTITION_KIND_IMPLICIT_GLOBAL)
+ return 1;
+ jl_binding_t *imported_binding = (jl_binding_t*)bpart->restriction;
+ if (no_replacement)
+ goto add_backedge;
+ jl_binding_partition_t *latest_imported_bpart = jl_atomic_load_relaxed(&imported_binding->partitions);
+ if (!latest_imported_bpart)
+ return 1;
+ if (jl_atomic_load_relaxed(&latest_imported_bpart->min_world) <=
+ jl_atomic_load_relaxed(&bpart->min_world)) {
+add_backedge:
+ // Imported binding is still valid
+ if ((kind == PARTITION_KIND_EXPLICIT || kind == PARTITION_KIND_IMPORTED) &&
+ external_blob_index((jl_value_t*)imported_binding) != mod_idx) {
+ jl_add_binding_backedge(imported_binding, (jl_value_t*)b);
+ }
+ return 1;
+ }
+ else {
+ // Binding partition was invalidated
+ assert(jl_atomic_load_relaxed(&bpart->min_world) == jl_require_world);
+ jl_atomic_store_relaxed(&bpart->min_world,
+ jl_atomic_load_relaxed(&latest_imported_bpart->min_world));
+ }
+invalidated:
+ // We need to go through and re-validate any bindings in the same image that
+ // may have imported us.
+ if (b->backedges) {
+ for (size_t i = 0; i < jl_array_len(b->backedges); i++) {
+ jl_value_t *edge = jl_array_ptr_ref(b->backedges, i);
+ if (!jl_is_binding(edge))
+ continue;
+ jl_binding_t *bedge = (jl_binding_t*)edge;
+ if (!jl_atomic_load_relaxed(&bedge->partitions))
+ continue;
+ jl_validate_binding_partition(bedge, jl_atomic_load_relaxed(&bedge->partitions), mod_idx, 0, 0);
+ }
+ }
+ if (bpart->kind & PARTITION_FLAG_EXPORTED) {
+ jl_module_t *mod = b->globalref->mod;
+ jl_sym_t *name = b->globalref->name;
+ JL_LOCK(&mod->lock);
+ jl_atomic_store_release(&mod->export_set_changed_since_require_world, 1);
+ if (mod->usings_backedges != jl_nothing) {
+ for (size_t i = 0; i < jl_array_len(mod->usings_backedges); i++) {
+ jl_module_t *edge = (jl_module_t*)jl_array_ptr_ref(mod->usings_backedges, i);
+ jl_binding_t *importee = jl_get_module_binding(edge, name, 0);
+ if (!importee)
+ continue;
+ if (!jl_atomic_load_relaxed(&importee->partitions))
+ continue;
+ JL_UNLOCK(&mod->lock);
+ jl_validate_binding_partition(importee, jl_atomic_load_relaxed(&importee->partitions), mod_idx, 0, 0);
+ JL_LOCK(&mod->lock);
+ }
+ }
+ JL_UNLOCK(&mod->lock);
+ return 0;
+ }
+ return 1;
+}
+
+static int all_usings_unchanged_implicit(jl_module_t *mod)
+{
+ int unchanged_implicit = 1;
+ for (size_t i = 0; unchanged_implicit && i < module_usings_length(mod); i++) {
+ jl_module_t *usee = module_usings_getmod(mod, i);
+ unchanged_implicit &= !jl_atomic_load_acquire(&usee->export_set_changed_since_require_world);
+ }
+ return unchanged_implicit;
+}
+
+static void jl_restore_system_image_from_stream_(ios_t *f, jl_image_t *image,
+ jl_array_t *depmods, uint64_t checksum,
/* outputs */ jl_array_t **restored, jl_array_t **init_order,
jl_array_t **extext_methods, jl_array_t **internal_methods,
jl_array_t **new_ext_cis, jl_array_t **method_roots_list,
jl_array_t **edges,
- char **base, arraylist_t *ccallable_list, pkgcachesizes *cachesizes) JL_GC_DISABLED
+ pkgcachesizes *cachesizes) JL_GC_DISABLED
{
jl_task_t *ct = jl_current_task;
int en = jl_gc_enable(0);
@@ -3621,7 +3870,6 @@ static void jl_restore_system_image_from_stream_(ios_t *f, jl_image_t *image, jl
ios_read(f, (char*)jl_array_data(s.link_ids_external_fnvars, uint32_t), nlinks_external_fnvars * sizeof(uint32_t));
}
uint32_t external_fns_begin = read_uint32(f);
- jl_read_arraylist(s.s, ccallable_list ? ccallable_list : &s.ccallable_list);
if (s.incremental) {
assert(restored && init_order && extext_methods && internal_methods && new_ext_cis && method_roots_list && edges);
*restored = (jl_array_t*)jl_delayed_reloc(&s, offset_restored);
@@ -3641,8 +3889,6 @@ static void jl_restore_system_image_from_stream_(ios_t *f, jl_image_t *image, jl
char *image_base = (char*)&sysimg.buf[0];
reloc_t *relocs_base = (reloc_t*)&relocs.buf[0];
- if (base)
- *base = image_base;
s.s = &sysimg;
jl_read_reloclist(&s, s.link_ids_gctags, GC_OLD | GC_IN_IMAGE); // gctags
@@ -3923,27 +4169,43 @@ static void jl_restore_system_image_from_stream_(ios_t *f, jl_image_t *image, jl
memcpy(newitems, mod->usings.items, mod->usings.len * sizeof(void*));
mod->usings.items = newitems;
}
- // Move the binding bits back to their correct place
-#ifdef _P64
- jl_svec_t *table = jl_atomic_load_relaxed(&mod->bindings);
- for (size_t i = 0; i < jl_svec_len(table); i++) {
- jl_binding_t *b = (jl_binding_t*)jl_svecref(table, i);
- if ((jl_value_t*)b == jl_nothing)
- continue;
- jl_binding_partition_t *bpart = jl_atomic_load_relaxed(&b->partitions);
- while (bpart) {
- jl_atomic_store_relaxed(&bpart->restriction,
- encode_restriction((jl_value_t*)jl_atomic_load_relaxed(&bpart->restriction), bpart->reserved));
- bpart->reserved = 0;
- bpart = jl_atomic_load_relaxed(&bpart->next);
+ size_t mod_idx = external_blob_index((jl_value_t*)mod);
+ if (s.incremental) {
+ // Rebuild cross-image usings backedges
+ for (size_t i = 0; i < module_usings_length(mod); ++i) {
+ struct _jl_module_using *data = module_usings_getidx(mod, i);
+ if (external_blob_index((jl_value_t*)data->mod) != mod_idx) {
+ jl_add_usings_backedge(data->mod, mod);
+ }
}
}
-#endif
}
else {
abort();
}
}
+ if (s.incremental) {
+ int no_replacement = jl_atomic_load_relaxed(&jl_first_image_replacement_world) == ~(size_t)0;
+ for (size_t i = 0; i < s.fixup_objs.len; i++) {
+ uintptr_t item = (uintptr_t)s.fixup_objs.items[i];
+ jl_value_t *obj = (jl_value_t*)(image_base + item);
+ if (jl_is_module(obj)) {
+ jl_module_t *mod = (jl_module_t*)obj;
+ size_t mod_idx = external_blob_index((jl_value_t*)mod);
+ jl_svec_t *table = jl_atomic_load_relaxed(&mod->bindings);
+ int unchanged_implicit = no_replacement || all_usings_unchanged_implicit(mod);
+ for (size_t i = 0; i < jl_svec_len(table); i++) {
+ jl_binding_t *b = (jl_binding_t*)jl_svecref(table, i);
+ if ((jl_value_t*)b == jl_nothing)
+ continue;
+ jl_binding_partition_t *bpart = jl_atomic_load_relaxed(&b->partitions);
+ if (!jl_validate_binding_partition(b, bpart, mod_idx, unchanged_implicit, no_replacement)) {
+ unchanged_implicit = all_usings_unchanged_implicit(mod);
+ }
+ }
+ }
+ }
+ }
arraylist_free(&s.fixup_types);
arraylist_free(&s.fixup_objs);
@@ -3986,11 +4248,6 @@ static void jl_restore_system_image_from_stream_(ios_t *f, jl_image_t *image, jl
s.s = &sysimg;
jl_update_all_fptrs(&s, image); // fptr relocs and registration
- if (!ccallable_list) {
- // TODO: jl_sysimg_handle or img_handle?
- jl_reinit_ccallable(&s.ccallable_list, image_base, jl_sysimg_handle);
- arraylist_free(&s.ccallable_list);
- }
s.s = NULL;
ios_close(&fptr_record);
@@ -4068,8 +4325,6 @@ static jl_value_t *jl_restore_package_image_from_stream(void* pkgimage_handle, i
assert(datastartpos > 0 && datastartpos < dataendpos);
needs_permalloc = jl_options.permalloc_pkgimg || needs_permalloc;
- char *base;
- arraylist_t ccallable_list;
jl_value_t *restored = NULL;
jl_array_t *init_order = NULL, *extext_methods = NULL, *internal_methods = NULL, *new_ext_cis = NULL, *method_roots_list = NULL, *edges = NULL;
@@ -4099,12 +4354,15 @@ static jl_value_t *jl_restore_package_image_from_stream(void* pkgimage_handle, i
ios_static_buffer(f, sysimg, len);
pkgcachesizes cachesizes;
jl_restore_system_image_from_stream_(f, image, depmods, checksum, (jl_array_t**)&restored, &init_order, &extext_methods, &internal_methods, &new_ext_cis, &method_roots_list,
- &edges, &base, &ccallable_list, &cachesizes);
+ &edges, &cachesizes);
JL_SIGATOMIC_END();
- // No special processing of `new_ext_cis` is required because recaching handled it
// Add roots to methods
- jl_copy_roots(method_roots_list, jl_worklist_key((jl_array_t*)restored));
+ int failed = jl_copy_roots(method_roots_list, jl_worklist_key((jl_array_t*)restored));
+ if (failed != 0) {
+ jl_printf(JL_STDERR, "Error copying roots to methods from Module: %s\n", pkgname);
+ abort();
+ }
// Insert method extensions and handle edges
int new_methods = jl_array_nrows(extext_methods) > 0;
if (!new_methods) {
@@ -4130,10 +4388,6 @@ static jl_value_t *jl_restore_package_image_from_stream(void* pkgimage_handle, i
// now permit more methods to be added again
JL_UNLOCK(&world_counter_lock);
- // reinit ccallables
- jl_reinit_ccallable(&ccallable_list, base, pkgimage_handle);
- arraylist_free(&ccallable_list);
-
jl_value_t *ext_edges = new_ext_cis ? (jl_value_t*)new_ext_cis : jl_nothing;
if (completeinfo) {
@@ -4158,16 +4412,16 @@ static jl_value_t *jl_restore_package_image_from_stream(void* pkgimage_handle, i
return restored;
}
-static void jl_restore_system_image_from_stream(ios_t *f, jl_image_t *image, uint32_t checksum)
+static void jl_restore_system_image_from_stream(ios_t *f, jl_image_t *image, void *image_handle, uint32_t checksum)
{
JL_TIMING(LOAD_IMAGE, LOAD_Sysimg);
- jl_restore_system_image_from_stream_(f, image, NULL, checksum | ((uint64_t)0xfdfcfbfa << 32), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ jl_restore_system_image_from_stream_(f, image, NULL, checksum | ((uint64_t)0xfdfcfbfa << 32), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
}
-JL_DLLEXPORT jl_value_t *jl_restore_incremental_from_buf(void* pkgimage_handle, const char *buf, jl_image_t *image, size_t sz, jl_array_t *depmods, int completeinfo, const char *pkgname, int needs_permalloc)
+JL_DLLEXPORT jl_value_t *jl_restore_incremental_from_buf(void* pkgimage_handle, jl_image_buf_t buf, jl_image_t *image, jl_array_t *depmods, int completeinfo, const char *pkgname, int needs_permalloc)
{
ios_t f;
- ios_static_buffer(&f, (char*)buf, sz);
+ ios_static_buffer(&f, (char*)buf.data, buf.size);
jl_value_t *ret = jl_restore_package_image_from_stream(pkgimage_handle, &f, image, depmods, completeinfo, pkgname, needs_permalloc);
ios_close(&f);
return ret;
@@ -4186,47 +4440,22 @@ JL_DLLEXPORT jl_value_t *jl_restore_incremental(const char *fname, jl_array_t *d
return ret;
}
-// TODO: need to enforce that the alignment of the buffer is suitable for vectors
-JL_DLLEXPORT void jl_restore_system_image(const char *fname)
+JL_DLLEXPORT void jl_restore_system_image(jl_image_t *image, jl_image_buf_t buf)
{
-#ifndef JL_NDEBUG
- char *dot = fname ? (char*)strrchr(fname, '.') : NULL;
- int is_ji = (dot && !strcmp(dot, ".ji"));
- assert((is_ji || jl_sysimg_handle) && "System image file not preloaded");
-#endif
+ ios_t f;
- if (jl_sysimg_handle) {
- // load the pre-compiled sysimage from jl_sysimg_handle
- jl_load_sysimg_so();
- }
- else {
- ios_t f;
- if (ios_file(&f, fname, 1, 0, 0, 0) == NULL)
- jl_errorf("System image file \"%s\" not found.", fname);
- ios_bufmode(&f, bm_none);
- JL_SIGATOMIC_BEGIN();
- ios_seek_end(&f);
- size_t len = ios_pos(&f);
- char *sysimg = (char*)jl_gc_perm_alloc(len, 0, 64, 0);
- ios_seek(&f, 0);
- if (ios_readall(&f, sysimg, len) != len)
- jl_errorf("Error reading system image file.");
- ios_close(&f);
- uint32_t checksum = jl_crc32c(0, sysimg, len);
- ios_static_buffer(&f, sysimg, len);
- jl_restore_system_image_from_stream(&f, &sysimage, checksum);
- ios_close(&f);
- JL_SIGATOMIC_END();
- }
-}
+ if (buf.kind == JL_IMAGE_KIND_NONE)
+ return;
+
+ if (buf.kind == JL_IMAGE_KIND_SO)
+ assert(image->fptrs.ptrs); // jl_init_processor_sysimg should already be run
-JL_DLLEXPORT void jl_restore_system_image_data(const char *buf, size_t len)
-{
- ios_t f;
JL_SIGATOMIC_BEGIN();
- ios_static_buffer(&f, (char*)buf, len);
- uint32_t checksum = jl_crc32c(0, buf, len);
- jl_restore_system_image_from_stream(&f, &sysimage, checksum);
+ ios_static_buffer(&f, (char *)buf.data, buf.size);
+
+ uint32_t checksum = jl_crc32c(0, buf.data, buf.size);
+ jl_restore_system_image_from_stream(&f, image, buf.handle, checksum);
+
ios_close(&f);
JL_SIGATOMIC_END();
}
@@ -4245,13 +4474,13 @@ JL_DLLEXPORT jl_value_t *jl_restore_package_image_from_file(const char *fname, j
#endif
jl_errorf("Error opening package file %s: %s\n", fname, reason);
}
- const char *pkgimg_data;
- jl_dlsym(pkgimg_handle, "jl_system_image_data", (void **)&pkgimg_data, 1);
- size_t *plen;
- jl_dlsym(pkgimg_handle, "jl_system_image_size", (void **)&plen, 1);
- jl_gc_notify_image_load(pkgimg_data, *plen);
- jl_image_t pkgimage = jl_init_processor_pkgimg(pkgimg_handle);
+ jl_image_buf_t buf = get_image_buf(pkgimg_handle, /* is_pkgimage */ 1);
+
+ jl_gc_notify_image_load(buf.data, buf.size);
+
+ // Despite the name, this function actually parses the pkgimage
+ jl_image_t pkgimage = jl_init_processor_pkgimg(buf);
if (ignore_native) {
// Must disable using native code in possible downstream users of this code:
@@ -4260,11 +4489,55 @@ JL_DLLEXPORT jl_value_t *jl_restore_package_image_from_file(const char *fname, j
IMAGE_NATIVE_CODE_TAINTED = 1;
}
- jl_value_t* mod = jl_restore_incremental_from_buf(pkgimg_handle, pkgimg_data, &pkgimage, *plen, depmods, completeinfo, pkgname, 0);
+ jl_value_t* mod = jl_restore_incremental_from_buf(pkgimg_handle, buf, &pkgimage, depmods, completeinfo, pkgname, 0);
return mod;
}
+JL_DLLEXPORT void _jl_promote_ci_to_current(jl_code_instance_t *ci, size_t validated_world) JL_NOTSAFEPOINT
+{
+ if (jl_atomic_load_relaxed(&ci->max_world) != validated_world)
+ return;
+ jl_atomic_store_relaxed(&ci->max_world, ~(size_t)0);
+ jl_svec_t *edges = jl_atomic_load_relaxed(&ci->edges);
+ for (size_t i = 0; i < jl_svec_len(edges); i++) {
+ jl_value_t *edge = jl_svecref(edges, i);
+ if (!jl_is_code_instance(edge))
+ continue;
+ _jl_promote_ci_to_current((jl_code_instance_t *)edge, validated_world);
+ }
+}
+
+JL_DLLEXPORT void jl_promote_ci_to_current(jl_code_instance_t *ci, size_t validated_world)
+{
+ size_t current_world = jl_atomic_load_relaxed(&jl_world_counter);
+ // No need to acquire the lock if we've been invalidated anyway
+ if (current_world > validated_world)
+ return;
+ JL_LOCK(&world_counter_lock);
+ current_world = jl_atomic_load_relaxed(&jl_world_counter);
+ if (current_world == validated_world) {
+ _jl_promote_ci_to_current(ci, validated_world);
+ }
+ JL_UNLOCK(&world_counter_lock);
+}
+
+JL_DLLEXPORT void jl_promote_cis_to_current(jl_code_instance_t **cis, size_t n, size_t validated_world)
+{
+ size_t current_world = jl_atomic_load_relaxed(&jl_world_counter);
+ // No need to acquire the lock if we've been invalidated anyway
+ if (current_world > validated_world)
+ return;
+ JL_LOCK(&world_counter_lock);
+ current_world = jl_atomic_load_relaxed(&jl_world_counter);
+ if (current_world == validated_world) {
+ for (size_t i = 0; i < n; i++) {
+ _jl_promote_ci_to_current(cis[i], validated_world);
+ }
+ }
+ JL_UNLOCK(&world_counter_lock);
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/src/staticdata_utils.c b/src/staticdata_utils.c
index 1985357321a3a..65f7dc59d9397 100644
--- a/src/staticdata_utils.c
+++ b/src/staticdata_utils.c
@@ -86,6 +86,23 @@ static jl_array_t *newly_inferred JL_GLOBALLY_ROOTED /*FIXME*/;
// Mutex for newly_inferred
jl_mutex_t newly_inferred_mutex;
extern jl_mutex_t world_counter_lock;
+static _Atomic(uint8_t) jl_tag_newly_inferred_enabled = 0;
+
+/**
+ * @brief Enable tagging of all newly inferred CodeInstances.
+ */
+JL_DLLEXPORT void jl_tag_newly_inferred_enable(void)
+{
+ jl_atomic_fetch_add(&jl_tag_newly_inferred_enabled, 1); // FIXME overflow?
+}
+/**
+ * @brief Disable tagging of all newly inferred CodeInstances.
+ */
+JL_DLLEXPORT void jl_tag_newly_inferred_disable(void)
+{
+ jl_atomic_fetch_add(&jl_tag_newly_inferred_enabled, -1); // FIXME underflow?
+}
+
// Register array of newly-inferred MethodInstances
// This gets called as the first step of Base.include_package_for_output
@@ -101,6 +118,12 @@ JL_DLLEXPORT void jl_push_newly_inferred(jl_value_t* ci)
{
if (!newly_inferred)
return;
+ uint8_t tag_newly_inferred = jl_atomic_load_relaxed(&jl_tag_newly_inferred_enabled);
+ if (tag_newly_inferred) {
+ jl_method_instance_t *mi = jl_get_ci_mi((jl_code_instance_t*)ci);
+ uint8_t miflags = jl_atomic_load_relaxed(&mi->flags);
+ jl_atomic_store_relaxed(&mi->flags, miflags | JL_MI_FLAGS_MASK_PRECOMPILED);
+ }
JL_LOCK(&newly_inferred_mutex);
size_t end = jl_array_nrows(newly_inferred);
jl_array_grow_end(newly_inferred, 1);
@@ -108,63 +131,81 @@ JL_DLLEXPORT void jl_push_newly_inferred(jl_value_t* ci)
JL_UNLOCK(&newly_inferred_mutex);
}
-
// compute whether a type references something internal to worklist
// and thus could not have existed before deserialize
// and thus does not need delayed unique-ing
-static int type_in_worklist(jl_value_t *v) JL_NOTSAFEPOINT
+static int type_in_worklist(jl_value_t *v, jl_query_cache *cache) JL_NOTSAFEPOINT
{
if (jl_object_in_image(v))
return 0; // fast-path for rejection
+
+ void *cached = HT_NOTFOUND;
+ if (cache != NULL)
+ cached = ptrhash_get(&cache->type_in_worklist, v);
+
+ // fast-path for memoized results
+ if (cached != HT_NOTFOUND)
+ return cached == v;
+
+ int result = 0;
if (jl_is_uniontype(v)) {
jl_uniontype_t *u = (jl_uniontype_t*)v;
- return type_in_worklist(u->a) ||
- type_in_worklist(u->b);
+ result = type_in_worklist(u->a, cache) ||
+ type_in_worklist(u->b, cache);
}
else if (jl_is_unionall(v)) {
jl_unionall_t *ua = (jl_unionall_t*)v;
- return type_in_worklist((jl_value_t*)ua->var) ||
- type_in_worklist(ua->body);
+ result = type_in_worklist((jl_value_t*)ua->var, cache) ||
+ type_in_worklist(ua->body, cache);
}
else if (jl_is_typevar(v)) {
jl_tvar_t *tv = (jl_tvar_t*)v;
- return type_in_worklist(tv->lb) ||
- type_in_worklist(tv->ub);
+ result = type_in_worklist(tv->lb, cache) ||
+ type_in_worklist(tv->ub, cache);
}
else if (jl_is_vararg(v)) {
jl_vararg_t *tv = (jl_vararg_t*)v;
- if (tv->T && type_in_worklist(tv->T))
- return 1;
- if (tv->N && type_in_worklist(tv->N))
- return 1;
+ result = ((tv->T && type_in_worklist(tv->T, cache)) ||
+ (tv->N && type_in_worklist(tv->N, cache)));
}
else if (jl_is_datatype(v)) {
jl_datatype_t *dt = (jl_datatype_t*)v;
- if (!jl_object_in_image((jl_value_t*)dt->name))
- return 1;
- jl_svec_t *tt = dt->parameters;
- size_t i, l = jl_svec_len(tt);
- for (i = 0; i < l; i++)
- if (type_in_worklist(jl_tparam(dt, i)))
- return 1;
+ if (!jl_object_in_image((jl_value_t*)dt->name)) {
+ result = 1;
+ }
+ else {
+ jl_svec_t *tt = dt->parameters;
+ size_t i, l = jl_svec_len(tt);
+ for (i = 0; i < l; i++) {
+ if (type_in_worklist(jl_tparam(dt, i), cache)) {
+ result = 1;
+ break;
+ }
+ }
+ }
}
else {
- return type_in_worklist(jl_typeof(v));
+ return type_in_worklist(jl_typeof(v), cache);
}
- return 0;
+
+ // Memoize result
+ if (cache != NULL)
+ ptrhash_put(&cache->type_in_worklist, (void*)v, result ? (void*)v : NULL);
+
+ return result;
}
// When we infer external method instances, ensure they link back to the
// package. Otherwise they might be, e.g., for external macros.
// Implements Tarjan's SCC (strongly connected components) algorithm, simplified to remove the count variable
-static int has_backedge_to_worklist(jl_method_instance_t *mi, htable_t *visited, arraylist_t *stack)
+static int has_backedge_to_worklist(jl_method_instance_t *mi, htable_t *visited, arraylist_t *stack, jl_query_cache *query_cache)
{
jl_module_t *mod = mi->def.module;
if (jl_is_method(mod))
mod = ((jl_method_t*)mod)->module;
assert(jl_is_module(mod));
uint8_t is_precompiled = jl_atomic_load_relaxed(&mi->flags) & JL_MI_FLAGS_MASK_PRECOMPILED;
- if (is_precompiled || !jl_object_in_image((jl_value_t*)mod) || type_in_worklist(mi->specTypes)) {
+ if (is_precompiled || !jl_object_in_image((jl_value_t*)mod) || type_in_worklist(mi->specTypes, query_cache)) {
return 1;
}
if (!mi->backedges) {
@@ -182,13 +223,16 @@ static int has_backedge_to_worklist(jl_method_instance_t *mi, htable_t *visited,
arraylist_push(stack, (void*)mi);
int depth = stack->len;
*bp = (void*)((char*)HT_NOTFOUND + 4 + depth); // preliminarily mark as in-progress
- size_t i = 0, n = jl_array_nrows(mi->backedges);
+ jl_array_t *backedges = jl_mi_get_backedges(mi);
+ size_t i = 0, n = jl_array_nrows(backedges);
int cycle = depth;
while (i < n) {
jl_code_instance_t *be;
- i = get_next_edge(mi->backedges, i, NULL, &be);
+ i = get_next_edge(backedges, i, NULL, &be);
+ if (!be)
+ continue;
JL_GC_PROMISE_ROOTED(be); // get_next_edge propagates the edge for us here
- int child_found = has_backedge_to_worklist(jl_get_ci_mi(be), visited, stack);
+ int child_found = has_backedge_to_worklist(jl_get_ci_mi(be), visited, stack, query_cache);
if (child_found == 1 || child_found == 2) {
// found what we were looking for, so terminate early
found = 1;
@@ -220,7 +264,7 @@ static int has_backedge_to_worklist(jl_method_instance_t *mi, htable_t *visited,
// from the worklist or explicitly added by a `precompile` statement, and
// (4) are the most recently computed result for that method.
// These will be preserved in the image.
-static jl_array_t *queue_external_cis(jl_array_t *list)
+static jl_array_t *queue_external_cis(jl_array_t *list, jl_query_cache *query_cache)
{
if (list == NULL)
return NULL;
@@ -239,7 +283,7 @@ static jl_array_t *queue_external_cis(jl_array_t *list)
jl_method_instance_t *mi = jl_get_ci_mi(ci);
jl_method_t *m = mi->def.method;
if (ci->owner == jl_nothing && jl_atomic_load_relaxed(&ci->inferred) && jl_is_method(m) && jl_object_in_image((jl_value_t*)m->module)) {
- int found = has_backedge_to_worklist(mi, &visited, &stack);
+ int found = has_backedge_to_worklist(mi, &visited, &stack, query_cache);
assert(found == 0 || found == 1 || found == 2);
assert(stack.len == 0);
if (found == 1 && jl_atomic_load_relaxed(&ci->max_world) == ~(size_t)0) {
@@ -261,51 +305,6 @@ static jl_array_t *queue_external_cis(jl_array_t *list)
return new_ext_cis;
}
-// New roots for external methods
-static void jl_collect_new_roots(jl_array_t *roots, jl_array_t *new_ext_cis, uint64_t key)
-{
- htable_t mset;
- htable_new(&mset, 0);
- size_t l = new_ext_cis ? jl_array_nrows(new_ext_cis) : 0;
- for (size_t i = 0; i < l; i++) {
- jl_code_instance_t *ci = (jl_code_instance_t*)jl_array_ptr_ref(new_ext_cis, i);
- assert(jl_is_code_instance(ci));
- jl_method_t *m = jl_get_ci_mi(ci)->def.method;
- assert(jl_is_method(m));
- ptrhash_put(&mset, (void*)m, (void*)m);
- }
- int nwithkey;
- void *const *table = mset.table;
- jl_array_t *newroots = NULL;
- JL_GC_PUSH1(&newroots);
- for (size_t i = 0; i < mset.size; i += 2) {
- if (table[i+1] != HT_NOTFOUND) {
- jl_method_t *m = (jl_method_t*)table[i];
- assert(jl_is_method(m));
- nwithkey = nroots_with_key(m, key);
- if (nwithkey) {
- jl_array_ptr_1d_push(roots, (jl_value_t*)m);
- newroots = jl_alloc_vec_any(nwithkey);
- jl_array_ptr_1d_push(roots, (jl_value_t*)newroots);
- rle_iter_state rootiter = rle_iter_init(0);
- uint64_t *rletable = NULL;
- size_t nblocks2 = 0, nroots = jl_array_nrows(m->roots), k = 0;
- if (m->root_blocks) {
- rletable = jl_array_data(m->root_blocks, uint64_t);
- nblocks2 = jl_array_nrows(m->root_blocks);
- }
- while (rle_iter_increment(&rootiter, nroots, rletable, nblocks2))
- if (rootiter.key == key)
- jl_array_ptr_set(newroots, k++, jl_array_ptr_ref(m->roots, rootiter.i));
- assert(k == nwithkey);
- }
- }
- }
- JL_GC_POP();
- htable_free(&mset);
-}
-
-
// For every method:
// - if the method is owned by a worklist module, add it to the list of things to be
// verified on reloading
@@ -707,9 +706,7 @@ static void jl_activate_methods(jl_array_t *external, jl_array_t *internal, size
else if (jl_is_method(obj)) {
jl_method_t *m = (jl_method_t*)obj;
assert(jl_atomic_load_relaxed(&m->primary_world) == ~(size_t)0);
- assert(jl_atomic_load_relaxed(&m->deleted_world) == WORLD_AGE_REVALIDATION_SENTINEL);
jl_atomic_store_release(&m->primary_world, world);
- jl_atomic_store_release(&m->deleted_world, ~(size_t)0);
}
else if (jl_is_code_instance(obj)) {
jl_code_instance_t *ci = (jl_code_instance_t*)obj;
@@ -737,17 +734,31 @@ static void jl_activate_methods(jl_array_t *external, jl_array_t *internal, size
}
}
-static void jl_copy_roots(jl_array_t *method_roots_list, uint64_t key)
+static int jl_copy_roots(jl_array_t *method_roots_list, uint64_t key)
{
size_t i, l = jl_array_nrows(method_roots_list);
+ int failed = 0;
for (i = 0; i < l; i+=2) {
jl_method_t *m = (jl_method_t*)jl_array_ptr_ref(method_roots_list, i);
jl_array_t *roots = (jl_array_t*)jl_array_ptr_ref(method_roots_list, i+1);
if (roots) {
assert(jl_is_array(roots));
+ if (m->root_blocks) {
+ // check for key collision
+ uint64_t *blocks = jl_array_data(m->root_blocks, uint64_t);
+ size_t nx2 = jl_array_nrows(m->root_blocks);
+ for (size_t i = 0; i < nx2; i+=2) {
+ if (blocks[i] == key) {
+ // found duplicate block
+ failed = -1;
+ }
+ }
+ }
+
jl_append_method_roots(m, key, roots);
}
}
+ return failed;
}
static jl_value_t *read_verify_mod_list(ios_t *s, jl_array_t *depmods)
diff --git a/src/subtype.c b/src/subtype.c
index a0b7bff4006ce..3d50dc492e190 100644
--- a/src/subtype.c
+++ b/src/subtype.c
@@ -139,7 +139,7 @@ static jl_varbinding_t *lookup(jl_stenv_t *e, jl_tvar_t *v) JL_GLOBALLY_ROOTED J
static int statestack_get(jl_unionstate_t *st, int i) JL_NOTSAFEPOINT
{
- assert(i >= 0 && i <= 32767); // limited by the depth bit.
+ assert(i >= 0 && i < 32767); // limited by the depth bit.
// get the `i`th bit in an array of 32-bit words
jl_bits_stack_t *stack = &st->stack;
while (i >= sizeof(stack->data) * 8) {
@@ -153,7 +153,7 @@ static int statestack_get(jl_unionstate_t *st, int i) JL_NOTSAFEPOINT
static void statestack_set(jl_unionstate_t *st, int i, int val) JL_NOTSAFEPOINT
{
- assert(i >= 0 && i <= 32767); // limited by the depth bit.
+ assert(i >= 0 && i < 32767); // limited by the depth bit.
jl_bits_stack_t *stack = &st->stack;
while (i >= sizeof(stack->data) * 8) {
if (__unlikely(stack->next == NULL)) {
@@ -1069,7 +1069,8 @@ static int forall_exists_equal(jl_value_t *x, jl_value_t *y, jl_stenv_t *e);
static int subtype_tuple_varargs(
jl_vararg_t *vtx, jl_vararg_t *vty,
- size_t vx, size_t vy,
+ jl_value_t *lastx, jl_value_t *lasty,
+ size_t vx, size_t vy, size_t x_reps,
jl_stenv_t *e, int param)
{
jl_value_t *xp0 = jl_unwrap_vararg(vtx); jl_value_t *xp1 = jl_unwrap_vararg_num(vtx);
@@ -1111,12 +1112,30 @@ static int subtype_tuple_varargs(
}
}
}
-
- // in Vararg{T1} <: Vararg{T2}, need to check subtype twice to
- // simulate the possibility of multiple arguments, which is needed
- // to implement the diagonal rule correctly.
- if (!subtype(xp0, yp0, e, param)) return 0;
- if (!subtype(xp0, yp0, e, 1)) return 0;
+ int x_same = vx > 1 || (lastx && obviously_egal(xp0, lastx));
+ int y_same = vy > 1 || (lasty && obviously_egal(yp0, lasty));
+ // keep track of number of consecutive identical subtyping
+ x_reps = y_same && x_same ? x_reps + 1 : 1;
+ if (x_reps > 2) {
+ // an identical type on the left doesn't need to be compared to the same
+ // element type on the right more than twice.
+ }
+ else if (x_same && e->Runions.depth == 0 && y_same &&
+ !jl_has_free_typevars(xp0) && !jl_has_free_typevars(yp0)) {
+ // fast path for repeated elements
+ }
+ else if ((e->Runions.depth == 0 ? !jl_has_free_typevars(xp0) : jl_is_concrete_type(xp0)) && !jl_has_free_typevars(yp0)) {
+ // fast path for separable sub-formulas
+ if (!jl_subtype(xp0, yp0))
+ return 0;
+ }
+ else {
+ // in Vararg{T1} <: Vararg{T2}, need to check subtype twice to
+ // simulate the possibility of multiple arguments, which is needed
+ // to implement the diagonal rule correctly.
+ if (!subtype(xp0, yp0, e, param)) return 0;
+ if (x_reps < 2 && !subtype(xp0, yp0, e, 1)) return 0;
+ }
constrain_length:
if (!yp1) {
@@ -1246,7 +1265,8 @@ static int subtype_tuple_tail(jl_datatype_t *xd, jl_datatype_t *yd, int8_t R, jl
return subtype_tuple_varargs(
(jl_vararg_t*)xi,
(jl_vararg_t*)yi,
- vx, vy, e, param);
+ lastx, lasty,
+ vx, vy, x_reps, e, param);
}
if (j >= ly)
@@ -1267,7 +1287,7 @@ static int subtype_tuple_tail(jl_datatype_t *xd, jl_datatype_t *yd, int8_t R, jl
(yi == lastx && !vx && vy && jl_is_concrete_type(xi)))) {
// fast path for repeated elements
}
- else if (e->Runions.depth == 0 && !jl_has_free_typevars(xi) && !jl_has_free_typevars(yi)) {
+ else if ((e->Runions.depth == 0 ? !jl_has_free_typevars(xi) : jl_is_concrete_type(xi)) && !jl_has_free_typevars(yi)) {
// fast path for separable sub-formulas
if (!jl_subtype(xi, yi))
return 0;
@@ -1393,7 +1413,7 @@ static int subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, int param)
x = pick_union_element(x, e, 0);
}
if (jl_is_uniontype(y)) {
- if (x == ((jl_uniontype_t*)y)->a || x == ((jl_uniontype_t*)y)->b)
+ if (obviously_in_union(y, x))
return 1;
if (jl_is_unionall(x))
return subtype_unionall(y, (jl_unionall_t*)x, e, 0, param);
@@ -1448,11 +1468,14 @@ static int subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, int param)
}
if (jl_is_unionall(y)) {
jl_varbinding_t *xb = lookup(e, (jl_tvar_t*)x);
- if (xb == NULL ? !e->ignore_free : !xb->right) {
+ jl_value_t *xub = xb == NULL ? ((jl_tvar_t *)x)->ub : xb->ub;
+ if ((xb == NULL ? !e->ignore_free : !xb->right) && xub != y) {
// We'd better unwrap `y::UnionAll` eagerly if `x` isa ∀-var.
// This makes sure the following cases work correct:
// 1) `∀T <: Union{∃S, SomeType{P}} where {P}`: `S == Any` ==> `S >: T`
// 2) `∀T <: Union{∀T, SomeType{P}} where {P}`:
+ // note: if xub == y we'd better try `subtype_var` as `subtype_left_var`
+ // hit `==` based fast path.
return subtype_unionall(x, (jl_unionall_t*)y, e, 1, param);
}
}
@@ -1590,6 +1613,8 @@ static int has_exists_typevar(jl_value_t *x, jl_stenv_t *e) JL_NOTSAFEPOINT
return env != NULL && jl_has_bound_typevars(x, env);
}
+static int forall_exists_subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, int param);
+
static int local_forall_exists_subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, int param, int limit_slow)
{
int16_t oldRmore = e->Runions.more;
@@ -1603,7 +1628,18 @@ static int local_forall_exists_subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t
return jl_subtype(x, y);
int has_exists = (!kindx && has_exists_typevar(x, e)) ||
(!kindy && has_exists_typevar(y, e));
- if (has_exists && (is_exists_typevar(x, e) != is_exists_typevar(y, e))) {
+ if (!has_exists) {
+ // We can use ∀_∃_subtype safely for ∃ free inputs.
+ // This helps to save some bits in union stack.
+ jl_saved_unionstate_t oldRunions; push_unionstate(&oldRunions, &e->Runions);
+ e->Lunions.used = e->Runions.used = 0;
+ e->Lunions.depth = e->Runions.depth = 0;
+ e->Lunions.more = e->Runions.more = 0;
+ sub = forall_exists_subtype(x, y, e, param);
+ pop_unionstate(&e->Runions, &oldRunions);
+ return sub;
+ }
+ if (is_exists_typevar(x, e) != is_exists_typevar(y, e)) {
e->Lunions.used = 0;
while (1) {
e->Lunions.more = 0;
@@ -1617,7 +1653,7 @@ static int local_forall_exists_subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t
if (limit_slow == -1)
limit_slow = kindx || kindy;
jl_savedenv_t se;
- save_env(e, &se, has_exists);
+ save_env(e, &se, 1);
int count, limited = 0, ini_count = 0;
jl_saved_unionstate_t latestLunions = {0, 0, 0, NULL};
while (1) {
@@ -1635,13 +1671,13 @@ static int local_forall_exists_subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t
limited = 1;
if (!sub || !next_union_state(e, 0))
break;
- if (limited || !has_exists || e->Runions.more == oldRmore) {
+ if (limited || e->Runions.more == oldRmore) {
// re-save env and freeze the ∃decision for previous ∀Union
// Note: We could ignore the rest `∃Union` decisions if `x` and `y`
// contain no ∃ typevar, as they have no effect on env.
ini_count = count;
push_unionstate(&latestLunions, &e->Lunions);
- re_save_env(e, &se, has_exists);
+ re_save_env(e, &se, 1);
e->Runions.more = oldRmore;
}
}
@@ -1649,12 +1685,12 @@ static int local_forall_exists_subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t
break;
assert(e->Runions.more > oldRmore);
next_union_state(e, 1);
- restore_env(e, &se, has_exists); // also restore Rdepth here
+ restore_env(e, &se, 1); // also restore Rdepth here
e->Runions.more = oldRmore;
}
if (!sub)
assert(e->Runions.more == oldRmore);
- else if (limited || !has_exists)
+ else if (limited)
e->Runions.more = oldRmore;
free_env(&se);
return sub;
@@ -2539,9 +2575,6 @@ static jl_value_t *intersect_aside(jl_value_t *x, jl_value_t *y, jl_stenv_t *e,
static jl_value_t *intersect_union(jl_value_t *x, jl_uniontype_t *u, jl_stenv_t *e, int8_t R, int param)
{
- // band-aid for #56040
- if (!jl_is_uniontype(x) && obviously_in_union((jl_value_t *)u, x))
- return x;
int no_free = !jl_has_free_typevars(x) && !jl_has_free_typevars((jl_value_t*)u);
if (param == 2 || no_free) {
jl_value_t *a=NULL, *b=NULL;
@@ -2678,31 +2711,22 @@ static void set_bound(jl_value_t **bound, jl_value_t *val, jl_tvar_t *v, jl_sten
// subtype, treating all vars as existential
static int subtype_in_env_existential(jl_value_t *x, jl_value_t *y, jl_stenv_t *e)
{
- jl_varbinding_t *v = e->vars;
- int len = 0;
- if (x == jl_bottom_type || y == (jl_value_t*)jl_any_type)
+ if (x == jl_bottom_type || y == (jl_value_t*)jl_any_type || obviously_in_union(y, x))
return 1;
- while (v != NULL) {
- len++;
- v = v->prev;
- }
- int8_t *rs = (int8_t*)malloc_s(len);
+ int8_t *rs = (int8_t*)alloca(current_env_length(e));
+ jl_varbinding_t *v = e->vars;
int n = 0;
- v = e->vars;
- while (n < len) {
- assert(v != NULL);
+ while (v != NULL) {
rs[n++] = v->right;
v->right = 1;
v = v->prev;
}
int issub = subtype_in_env(x, y, e);
n = 0; v = e->vars;
- while (n < len) {
- assert(v != NULL);
+ while (v != NULL) {
v->right = rs[n++];
v = v->prev;
}
- free(rs);
return issub;
}
@@ -2750,6 +2774,8 @@ static int check_unsat_bound(jl_value_t *t, jl_tvar_t *v, jl_stenv_t *e) JL_NOTS
}
+static int intersect_var_ccheck_in_env(jl_value_t *xlb, jl_value_t *xub, jl_value_t *ylb, jl_value_t *yub, jl_stenv_t *e, int flip);
+
static jl_value_t *intersect_var(jl_tvar_t *b, jl_value_t *a, jl_stenv_t *e, int8_t R, int param)
{
jl_varbinding_t *bb = lookup(e, b);
@@ -2761,20 +2787,14 @@ static jl_value_t *intersect_var(jl_tvar_t *b, jl_value_t *a, jl_stenv_t *e, int
return R ? intersect(a, bb->lb, e, param) : intersect(bb->lb, a, e, param);
if (!jl_is_type(a) && !jl_is_typevar(a))
return set_var_to_const(bb, a, e, R);
- jl_savedenv_t se;
if (param == 2) {
jl_value_t *ub = NULL;
JL_GC_PUSH1(&ub);
if (!jl_has_free_typevars(a)) {
- save_env(e, &se, 1);
- int issub = subtype_in_env_existential(bb->lb, a, e);
- restore_env(e, &se, 1);
- if (issub) {
- issub = subtype_in_env_existential(a, bb->ub, e);
- restore_env(e, &se, 1);
- }
- free_env(&se);
- if (!issub) {
+ if (R) flip_offset(e);
+ int ccheck = intersect_var_ccheck_in_env(bb->lb, bb->ub, a, a, e, !R);
+ if (R) flip_offset(e);
+ if (!ccheck) {
JL_GC_POP();
return jl_bottom_type;
}
@@ -2784,6 +2804,7 @@ static jl_value_t *intersect_var(jl_tvar_t *b, jl_value_t *a, jl_stenv_t *e, int
e->triangular++;
ub = R ? intersect_aside(a, bb->ub, e, bb->depth0) : intersect_aside(bb->ub, a, e, bb->depth0);
e->triangular--;
+ jl_savedenv_t se;
save_env(e, &se, 1);
int issub = subtype_in_env_existential(bb->lb, ub, e);
restore_env(e, &se, 1);
@@ -2813,7 +2834,7 @@ static jl_value_t *intersect_var(jl_tvar_t *b, jl_value_t *a, jl_stenv_t *e, int
jl_value_t *ub = R ? intersect_aside(a, bb->ub, e, bb->depth0) : intersect_aside(bb->ub, a, e, bb->depth0);
if (ub == jl_bottom_type)
return jl_bottom_type;
- if (bb->constraintkind == 1 || e->triangular) {
+ if (bb->constraintkind == 1 || (e->triangular && param == 1)) {
if (e->triangular && check_unsat_bound(ub, b, e))
return jl_bottom_type;
set_bound(&bb->ub, ub, b, e);
@@ -3856,6 +3877,89 @@ static int subtype_by_bounds(jl_value_t *x, jl_value_t *y, jl_stenv_t *e) JL_NOT
return compareto_var(x, (jl_tvar_t*)y, e, -1) || compareto_var(y, (jl_tvar_t*)x, e, 1);
}
+static int intersect_var_ccheck_in_env(jl_value_t *xlb, jl_value_t *xub, jl_value_t *ylb, jl_value_t *yub, jl_stenv_t *e, int flip)
+{
+ int easy_check1 = xlb == jl_bottom_type ||
+ yub == (jl_value_t *)jl_any_type ||
+ (e->Loffset == 0 && obviously_in_union(yub, xlb));
+ int easy_check2 = ylb == jl_bottom_type ||
+ xub == (jl_value_t *)jl_any_type ||
+ (e->Loffset == 0 && obviously_in_union(xub, ylb));
+ int nofree1 = 0, nofree2 = 0;
+ if (!easy_check1) {
+ nofree1 = !jl_has_free_typevars(xlb) && !jl_has_free_typevars(yub);
+ if (nofree1 && e->Loffset == 0) {
+ easy_check1 = jl_subtype(xlb, yub);
+ if (!easy_check1)
+ return 0;
+ }
+ }
+ if (!easy_check2) {
+ nofree2 = !jl_has_free_typevars(ylb) && !jl_has_free_typevars(xub);
+ if (nofree2 && e->Loffset == 0) {
+ easy_check2 = jl_subtype(ylb, xub);
+ if (!easy_check2)
+ return 0;
+ }
+ }
+ if (easy_check1 && easy_check2)
+ return 1;
+ int ccheck = 0;
+ if ((easy_check1 || nofree1) && (easy_check2 || nofree2)) {
+ jl_varbinding_t *vars = e->vars;
+ e->vars = NULL;
+ ccheck = easy_check1 || subtype_in_env(xlb, yub, e);
+ if (ccheck && !easy_check2) {
+ flip_offset(e);
+ ccheck = subtype_in_env(ylb, xub, e);
+ flip_offset(e);
+ }
+ e->vars = vars;
+ return ccheck;
+ }
+ jl_savedenv_t se;
+ save_env(e, &se, 1);
+ // first try normal flip.
+ if (flip) flip_vars(e);
+ ccheck = easy_check1 || subtype_in_env(xlb, yub, e);
+ if (ccheck && !easy_check2) {
+ flip_offset(e);
+ ccheck = subtype_in_env(ylb, xub, e);
+ flip_offset(e);
+ }
+ if (flip) flip_vars(e);
+ if (!ccheck) {
+ // then try reverse flip.
+ restore_env(e, &se, 1);
+ if (!flip) flip_vars(e);
+ ccheck = easy_check1 || subtype_in_env(xlb, yub, e);
+ if (ccheck && !easy_check2) {
+ flip_offset(e);
+ ccheck = subtype_in_env(ylb, xub, e);
+ flip_offset(e);
+ }
+ if (!flip) flip_vars(e);
+ }
+ if (!ccheck) {
+ // then try existential.
+ restore_env(e, &se, 1);
+ if (easy_check1)
+ ccheck = 1;
+ else {
+ ccheck = subtype_in_env_existential(xlb, yub, e);
+ restore_env(e, &se, 1);
+ }
+ if (ccheck && !easy_check2) {
+ flip_offset(e);
+ ccheck = subtype_in_env_existential(ylb, xub, e);
+ flip_offset(e);
+ restore_env(e, &se, 1);
+ }
+ }
+ free_env(&se);
+ return ccheck;
+}
+
static int has_typevar_via_env(jl_value_t *x, jl_tvar_t *t, jl_stenv_t *e)
{
if (e->Loffset == 0) {
@@ -3988,14 +4092,8 @@ static jl_value_t *intersect(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, int pa
ccheck = 1;
}
else {
- if (R) flip_vars(e);
- ccheck = subtype_in_env(xlb, yub, e);
- if (ccheck) {
- flip_offset(e);
- ccheck = subtype_in_env(ylb, xub, e);
- flip_offset(e);
- }
- if (R) flip_vars(e);
+ // try many subtype check to avoid false `Union{}`
+ ccheck = intersect_var_ccheck_in_env(xlb, xub, ylb, yub, e, R);
}
if (R) flip_offset(e);
if (!ccheck)
@@ -4051,12 +4149,14 @@ static jl_value_t *intersect(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, int pa
if (jl_subtype(y, x)) return y;
}
if (jl_is_uniontype(x)) {
- if (y == ((jl_uniontype_t*)x)->a || y == ((jl_uniontype_t*)x)->b)
+ if (obviously_in_union(x, y))
return y;
+ if (jl_is_uniontype(y) && obviously_in_union(y, x))
+ return x;
return intersect_union(y, (jl_uniontype_t*)x, e, 0, param);
}
if (jl_is_uniontype(y)) {
- if (x == ((jl_uniontype_t*)y)->a || x == ((jl_uniontype_t*)y)->b)
+ if (obviously_in_union(y, x))
return x;
if (jl_is_unionall(x) && (jl_has_free_typevars(x) || jl_has_free_typevars(y)))
return intersect_unionall(y, (jl_unionall_t*)x, e, 0, param);
diff --git a/src/support/Makefile b/src/support/Makefile
index 1ee98a4eabdee..c7de154058586 100644
--- a/src/support/Makefile
+++ b/src/support/Makefile
@@ -48,10 +48,10 @@ $(BUILDDIR)/%.dbg.obj: $(SRCDIR)/%.S | $(BUILDDIR)
$(BUILDDIR)/host/Makefile:
mkdir -p $(BUILDDIR)/host
@# add Makefiles to the build directories for convenience (pointing back to the source location of each)
- @echo '# -- This file is automatically generated in julia/Makefile -- #' > $@
- @echo 'BUILDDIR=$(BUILDDIR)/host' >> $@
- @echo 'BUILDING_HOST_TOOLS=1' >> $@
- @echo 'include $(SRCDIR)/Makefile' >> $@
+ @printf "%s\n" '# -- This file is automatically generated in julia/Makefile -- #' > $@
+ @printf "%s\n" 'BUILDDIR=$(BUILDDIR)/host' >> $@
+ @printf "%s\n" 'BUILDING_HOST_TOOLS=1' >> $@
+ @printf "%s\n" 'include $(SRCDIR)/Makefile' >> $@
release: $(BUILDDIR)/libsupport.a
debug: $(BUILDDIR)/libsupport-debug.a
diff --git a/src/threading.c b/src/threading.c
index a51916cdcd8d8..90cbe39dd5b30 100644
--- a/src/threading.c
+++ b/src/threading.c
@@ -1,11 +1,9 @@
// This file is a part of Julia. License is MIT: https://julialang.org/license
-
#include
#include
#include
#include
#include
-
#include "julia.h"
#include "julia_internal.h"
#include "julia_assert.h"
@@ -228,10 +226,6 @@ void jl_set_pgcstack(jl_gcframe_t **pgcstack) JL_NOTSAFEPOINT
{
*jl_pgcstack_key() = pgcstack;
}
-# if JL_USE_IFUNC
-JL_DLLEXPORT __attribute__((weak))
-void jl_register_pgcstack_getter(void);
-# endif
static jl_gcframe_t **jl_get_pgcstack_init(void);
static jl_get_pgcstack_func *jl_get_pgcstack_cb = jl_get_pgcstack_init;
static jl_gcframe_t **jl_get_pgcstack_init(void)
@@ -244,15 +238,8 @@ static jl_gcframe_t **jl_get_pgcstack_init(void)
// This is clearly not thread-safe but should be fine since we
// make sure the tls states callback is finalized before adding
// multiple threads
-# if JL_USE_IFUNC
- if (jl_register_pgcstack_getter)
- jl_register_pgcstack_getter();
- else
-# endif
- {
- jl_get_pgcstack_cb = jl_get_pgcstack_fallback;
- jl_pgcstack_key = &jl_pgcstack_addr_fallback;
- }
+ jl_get_pgcstack_cb = jl_get_pgcstack_fallback;
+ jl_pgcstack_key = &jl_pgcstack_addr_fallback;
return jl_get_pgcstack_cb();
}
@@ -336,7 +323,17 @@ jl_ptls_t jl_init_threadtls(int16_t tid)
#endif
if (jl_get_pgcstack() != NULL)
abort();
- jl_ptls_t ptls = (jl_ptls_t)calloc(1, sizeof(jl_tls_states_t));
+ jl_ptls_t ptls;
+#if defined(_OS_WINDOWS_)
+ ptls = _aligned_malloc(sizeof(jl_tls_states_t), alignof(jl_tls_states_t));
+ if (ptls == NULL)
+ abort();
+#else
+ if (posix_memalign((void**)&ptls, alignof(jl_tls_states_t), sizeof(jl_tls_states_t)))
+ abort();
+#endif
+ memset(ptls, 0, sizeof(jl_tls_states_t));
+
#ifndef _OS_WINDOWS_
pthread_setspecific(jl_task_exit_key, (void*)ptls);
#endif
@@ -424,7 +421,6 @@ static void jl_init_task_lock(jl_task_t *ct)
}
}
-
JL_DLLEXPORT jl_gcframe_t **jl_adopt_thread(void)
{
// `jl_init_threadtls` puts us in a GC unsafe region, so ensure GC isn't running.
@@ -450,6 +446,22 @@ JL_DLLEXPORT jl_gcframe_t **jl_adopt_thread(void)
return &ct->gcstack;
}
+JL_DLLEXPORT jl_gcframe_t **jl_autoinit_and_adopt_thread(void)
+{
+ if (!jl_is_initialized()) {
+ void *retaddr = __builtin_extract_return_addr(__builtin_return_address(0));
+ void *handle = jl_find_dynamic_library_by_addr(retaddr, 0);
+ if (handle == NULL) {
+ fprintf(stderr, "error: runtime auto-initialization failed due to bad sysimage lookup\n"
+ " (this should not happen, please file a bug report)\n");
+ exit(1);
+ }
+ jl_init_with_image_handle(handle);
+ return &jl_get_current_task()->gcstack;
+ }
+
+ return jl_adopt_thread();
+}
void jl_safepoint_suspend_all_threads(jl_task_t *ct)
{
@@ -1088,6 +1100,289 @@ JL_DLLEXPORT int jl_setaffinity(int16_t tid, char *mask, int cpumasksize) {
return 0; // success
}
+// Heartbeat mechanism for Julia's task scheduler
+// ---
+// Start a thread that does not participate in running Julia's tasks. This
+// thread simply sleeps until the heartbeat mechanism is enabled. When
+// enabled, the heartbeat thread enters a loop in which it blocks waiting
+// for the specified heartbeat interval. If, within that interval,
+// `jl_heartbeat()` is *not* called at least once, then the thread calls
+// `jl_print_task_backtraces(0)`.
+
+#ifdef JL_HEARTBEAT_THREAD
+
+#include
+
+volatile int heartbeat_enabled;
+int heartbeat_tid; // Mostly used to ensure we skip this thread in the CPU profiler. XXX: not implemented on Windows
+uv_thread_t heartbeat_uvtid;
+uv_sem_t heartbeat_on_sem, // jl_heartbeat_enable -> thread
+ heartbeat_off_sem; // thread -> jl_heartbeat_enable
+int heartbeat_interval_s,
+ tasks_after_n,
+ reset_tasks_after_n;
+int tasks_showed, n_hbs_missed, n_hbs_recvd;
+_Atomic(int) heartbeats;
+
+JL_DLLEXPORT void jl_print_task_backtraces(int show_done) JL_NOTSAFEPOINT;
+void jl_heartbeat_threadfun(void *arg);
+
+// start the heartbeat thread with heartbeats disabled
+void jl_init_heartbeat(void)
+{
+ heartbeat_enabled = 0;
+ uv_sem_init(&heartbeat_on_sem, 0);
+ uv_sem_init(&heartbeat_off_sem, 0);
+ uv_thread_create(&heartbeat_uvtid, jl_heartbeat_threadfun, NULL);
+ uv_thread_detach(&heartbeat_uvtid);
+}
+
+int jl_inside_heartbeat_thread(void)
+{
+ uv_thread_t curr_uvtid = uv_thread_self();
+ return curr_uvtid == heartbeat_uvtid;
+}
+
+// enable/disable heartbeats
+// heartbeat_s: interval within which jl_heartbeat() must be called
+// show_tasks_after_n: number of heartbeats missed before printing task backtraces
+// reset_after_n: number of heartbeats after which to reset
+//
+// When disabling heartbeats, the heartbeat thread must wake up,
+// find out that heartbeats are now disabled, and reset. For now, we
+// handle this by preventing re-enabling of heartbeats until this
+// completes.
+JL_DLLEXPORT int jl_heartbeat_enable(int heartbeat_s, int show_tasks_after_n,
+ int reset_after_n)
+{
+ if (heartbeat_s <= 0) {
+ heartbeat_enabled = 0;
+ heartbeat_interval_s = tasks_after_n = reset_tasks_after_n = 0;
+ }
+ else {
+ // must disable before enabling
+ if (heartbeat_enabled) {
+ return -1;
+ }
+ // heartbeat thread must be ready
+ if (uv_sem_trywait(&heartbeat_off_sem) != 0) {
+ return -1;
+ }
+
+ jl_atomic_store_relaxed(&heartbeats, 0);
+ heartbeat_interval_s = heartbeat_s;
+ tasks_after_n = show_tasks_after_n;
+ reset_tasks_after_n = reset_after_n;
+ tasks_showed = 0;
+ n_hbs_missed = 0;
+ n_hbs_recvd = 0;
+ heartbeat_enabled = 1;
+ uv_sem_post(&heartbeat_on_sem); // wake the heartbeat thread
+ }
+ return 0;
+}
+
+// temporarily pause the heartbeat thread
+JL_DLLEXPORT int jl_heartbeat_pause(void)
+{
+ if (!heartbeat_enabled) {
+ return -1;
+ }
+ heartbeat_enabled = 0;
+ return 0;
+}
+
+// resume the paused heartbeat thread
+JL_DLLEXPORT int jl_heartbeat_resume(void)
+{
+ // cannot resume if the heartbeat thread is already running
+ if (heartbeat_enabled) {
+ return -1;
+ }
+
+ // cannot resume if we weren't paused (disabled != paused)
+ if (heartbeat_interval_s == 0) {
+ return -1;
+ }
+
+ // heartbeat thread must be ready
+ if (uv_sem_trywait(&heartbeat_off_sem) != 0) {
+ return -1;
+ }
+
+ // reset state as we've been paused
+ n_hbs_missed = 0;
+ n_hbs_recvd = 0;
+ tasks_showed = 0;
+
+ // resume
+ heartbeat_enabled = 1;
+ uv_sem_post(&heartbeat_on_sem); // wake the heartbeat thread
+ return 0;
+}
+
+// heartbeat
+JL_DLLEXPORT void jl_heartbeat(void)
+{
+ jl_atomic_fetch_add(&heartbeats, 1);
+}
+
+// sleep the thread for the specified interval
+void sleep_for(int secs, int nsecs)
+{
+ struct timespec rqtp, rmtp;
+ rqtp.tv_sec = secs;
+ rqtp.tv_nsec = nsecs;
+ rmtp.tv_sec = 0;
+ rmtp.tv_nsec = 0;
+ for (; ;) {
+ // this suspends the thread so we aren't using CPU
+ if (nanosleep(&rqtp, &rmtp) == 0) {
+ return;
+ }
+ // TODO: else if (errno == EINTR)
+ // this could be SIGTERM and we should shutdown but how to find out?
+ rqtp = rmtp;
+ }
+}
+
+// check for heartbeats and maybe report loss
+uint8_t check_heartbeats(uint8_t gc_state)
+{
+ int hb = jl_atomic_exchange(&heartbeats, 0);
+
+ if (hb <= 0) {
+ // we didn't get a heartbeat
+ n_hbs_recvd = 0;
+ n_hbs_missed++;
+
+ // if we've printed task backtraces already, do nothing
+ if (!tasks_showed) {
+ // otherwise, at least show this message
+ jl_safe_printf("==== heartbeat loss (%ds) ====\n",
+ n_hbs_missed * heartbeat_interval_s);
+ // if we've missed enough heartbeats, print task backtraces
+ if (n_hbs_missed >= tasks_after_n) {
+ jl_task_t *ct = jl_current_task;
+ jl_ptls_t ptls = ct->ptls;
+
+ // exit GC-safe region to report then re-enter
+ jl_gc_safe_leave(ptls, gc_state);
+ jl_print_task_backtraces(0);
+ gc_state = jl_gc_safe_enter(ptls);
+
+ // we printed task backtraces
+ tasks_showed = 1;
+ }
+ }
+ }
+ else {
+ // got a heartbeat
+ n_hbs_recvd++;
+ // if we'd printed task backtraces, check for reset
+ if (tasks_showed && n_hbs_recvd >= reset_tasks_after_n) {
+ tasks_showed = 0;
+ jl_safe_printf("==== heartbeats recovered (lost for %ds) ====\n",
+ n_hbs_missed * heartbeat_interval_s);
+ }
+ n_hbs_missed = 0;
+ }
+
+ return gc_state;
+}
+
+// heartbeat thread function
+void jl_heartbeat_threadfun(void *arg)
+{
+ int s = 59, ns = 1e9 - 1, rs;
+ uint64_t t0, tchb;
+
+ // We need a TLS because backtraces are accumulated into ptls->bt_size
+ // and ptls->bt_data, so we need to call jl_adopt_thread().
+ jl_adopt_thread();
+ (void)jl_atomic_fetch_add_relaxed(&n_threads_running, -1);
+ jl_task_t *ct = jl_current_task;
+ jl_ptls_t ptls = ct->ptls;
+ heartbeat_tid = ptls->tid;
+
+ // Don't hold up GC, this thread doesn't participate.
+ uint8_t gc_state = jl_gc_safe_enter(ptls);
+
+ for (;;) {
+ if (!heartbeat_enabled) {
+ // post the off semaphore to indicate we're ready to enable
+ uv_sem_post(&heartbeat_off_sem);
+
+ // sleep the thread here; this semaphore is posted in
+ // jl_heartbeat_enable() or jl_heartbeat_resume()
+ uv_sem_wait(&heartbeat_on_sem);
+
+ // Set the sleep duration.
+ s = heartbeat_interval_s - 1;
+ ns = 1e9 - 1;
+ continue;
+ }
+
+ // heartbeat is enabled; sleep, waiting for the desired interval
+ sleep_for(s, ns);
+
+ // if heartbeats were turned off/paused while we were sleeping, reset
+ if (!heartbeat_enabled) {
+ continue;
+ }
+
+ // check if any heartbeats have happened, report as appropriate
+ t0 = jl_hrtime();
+ gc_state = check_heartbeats(gc_state);
+ tchb = jl_hrtime() - t0;
+
+ // adjust the next sleep duration based on how long the heartbeat
+ // check took, but if it took too long then use the normal duration
+ rs = 1;
+ while (tchb > 1e9) {
+ rs++;
+ tchb -= 1e9;
+ }
+ if (rs < heartbeat_interval_s) {
+ s = heartbeat_interval_s - rs;
+ }
+ ns = 1e9 - tchb;
+ }
+}
+
+#else // !JL_HEARTBEAT_THREAD
+
+void jl_init_heartbeat(void)
+{
+}
+
+int jl_inside_heartbeat_thread(void)
+{
+ return 0;
+}
+
+JL_DLLEXPORT int jl_heartbeat_enable(int heartbeat_s, int show_tasks_after_n,
+ int reset_after_n)
+{
+ return -1;
+}
+
+JL_DLLEXPORT int jl_heartbeat_pause(void)
+{
+ return -1;
+}
+
+JL_DLLEXPORT int jl_heartbeat_resume(void)
+{
+ return -1;
+}
+
+JL_DLLEXPORT void jl_heartbeat(void)
+{
+}
+
+#endif // JL_HEARTBEAT_THREAD
+
#ifdef __cplusplus
}
#endif
diff --git a/src/toplevel.c b/src/toplevel.c
index cdd390b9b49ed..f1fff694926ba 100644
--- a/src/toplevel.c
+++ b/src/toplevel.c
@@ -34,26 +34,24 @@ htable_t jl_current_modules;
jl_mutex_t jl_modules_mutex;
// During incremental compilation, the following gets set
-JL_DLLEXPORT jl_module_t *jl_precompile_toplevel_module = NULL; // the toplevel module currently being defined
+jl_module_t *jl_precompile_toplevel_module = NULL; // the toplevel module currently being defined
-JL_DLLEXPORT void jl_add_standard_imports(jl_module_t *m)
+jl_module_t *jl_add_standard_imports(jl_module_t *m)
{
jl_module_t *base_module = jl_base_relative_to(m);
assert(base_module != NULL);
// using Base
- jl_module_using(m, base_module);
+ jl_module_initial_using(m, base_module);
+ return base_module;
}
// create a new top-level module
void jl_init_main_module(void)
{
assert(jl_main_module == NULL);
- jl_main_module = jl_new_module(jl_symbol("Main"), NULL);
- jl_main_module->parent = jl_main_module;
- jl_set_const(jl_main_module, jl_symbol("Core"),
- (jl_value_t*)jl_core_module);
- jl_set_const(jl_core_module, jl_symbol("Main"),
- (jl_value_t*)jl_main_module);
+ jl_main_module = jl_new_module_(jl_symbol("Main"), NULL, 0, 1); // baremodule Main; end
+ jl_set_initial_const(jl_core_module, jl_symbol("Main"), (jl_value_t*)jl_main_module, 0); // const Main.Core = Core
+ jl_set_initial_const(jl_main_module, jl_symbol("Core"), (jl_value_t*)jl_core_module, 0); // const Core.Main = Main
}
static jl_function_t *jl_module_get_initializer(jl_module_t *m JL_PROPAGATES_ROOT)
@@ -135,61 +133,17 @@ static jl_value_t *jl_eval_module_expr(jl_module_t *parent_module, jl_expr_t *ex
}
int is_parent__toplevel__ = jl_is__toplevel__mod(parent_module);
- jl_module_t *newm = jl_new_module(name, is_parent__toplevel__ ? NULL : parent_module);
+ // If we have `Base`, don't also try to import `Core` - the `Base` exports are a superset.
+ // While we allow multiple imports of the same binding from different modules, various error printing
+ // performs reflection on which module a binding came from and we'd prefer users see "Base" here.
+ jl_module_t *newm = jl_new_module_(name, is_parent__toplevel__ ? NULL : parent_module, std_imports && jl_base_module != NULL ? 0 : 1, 1);
jl_value_t *form = (jl_value_t*)newm;
JL_GC_PUSH1(&form);
JL_LOCK(&jl_modules_mutex);
ptrhash_put(&jl_current_modules, (void*)newm, (void*)((uintptr_t)HT_NOTFOUND + 1));
JL_UNLOCK(&jl_modules_mutex);
-
- jl_module_t *old_toplevel_module = jl_precompile_toplevel_module;
-
// copy parent environment info into submodule
newm->uuid = parent_module->uuid;
- if (is_parent__toplevel__) {
- newm->parent = newm;
- jl_register_root_module(newm);
- if (jl_options.incremental) {
- jl_precompile_toplevel_module = newm;
- }
- }
- else {
- jl_binding_t *b = jl_get_module_binding(parent_module, name, 1);
- jl_binding_partition_t *bpart = jl_get_binding_partition(b, ct->world_age);
- jl_ptr_kind_union_t pku = encode_restriction(NULL, BINDING_KIND_UNDEF_CONST);
- jl_ptr_kind_union_t new_pku = encode_restriction((jl_value_t*)newm, BINDING_KIND_CONST);
- if (!jl_atomic_cmpswap(&bpart->restriction, &pku, new_pku)) {
- if (decode_restriction_kind(pku) != BINDING_KIND_CONST) {
- jl_declare_constant_val(b, parent_module, name, (jl_value_t*)newm);
- } else {
- // As a special exception allow binding replacement of modules
- if (!jl_is_module(decode_restriction_value(pku))) {
- jl_errorf("invalid redefinition of constant %s", jl_symbol_name(name));
- }
- if (jl_generating_output())
- jl_errorf("cannot replace module %s during compilation", jl_symbol_name(name));
- jl_printf(JL_STDERR, "WARNING: replacing module %s.\n", jl_symbol_name(name));
- pku = jl_atomic_exchange(&bpart->restriction, new_pku);
- }
- jl_gc_wb(bpart, newm);
- if (decode_restriction_value(pku) != NULL && jl_is_module(decode_restriction_value(pku))) {
- // create a hidden gc root for the old module
- JL_LOCK(&jl_modules_mutex);
- uintptr_t *refcnt = (uintptr_t*)ptrhash_bp(&jl_current_modules, decode_restriction_value(pku));
- *refcnt += 1;
- JL_UNLOCK(&jl_modules_mutex);
- }
- }
- }
-
- if (parent_module == jl_main_module && name == jl_symbol("Base")) {
- // pick up Base module during bootstrap
- jl_base_module = newm;
- }
-
- size_t last_age = ct->world_age;
-
- // add standard imports unless baremodule
jl_array_t *exprs = ((jl_expr_t*)jl_exprarg(ex, 2))->args;
int lineno = 0;
const char *filename = "none";
@@ -202,25 +156,42 @@ static jl_value_t *jl_eval_module_expr(jl_module_t *parent_module, jl_expr_t *ex
filename = jl_symbol_name((jl_sym_t*)file);
}
}
- if (std_imports) {
- if (jl_base_module != NULL) {
- jl_add_standard_imports(newm);
- jl_datatype_t *include_into = (jl_datatype_t *)jl_get_global(jl_base_module, jl_symbol("IncludeInto"));
- if (include_into) {
- form = jl_new_struct(include_into, newm);
- jl_set_const(newm, jl_symbol("include"), form);
- }
+ newm->file = jl_symbol(filename);
+ jl_gc_wb_knownold(newm, newm->file);
+ newm->line = lineno;
+
+ // add standard imports unless baremodule
+ if (std_imports && jl_base_module != NULL) {
+ jl_module_t *base = jl_add_standard_imports(newm);
+ jl_datatype_t *include_into = (jl_datatype_t *)jl_get_global(base, jl_symbol("IncludeInto"));
+ if (include_into) {
+ form = jl_new_struct(include_into, newm);
+ jl_set_initial_const(newm, jl_symbol("include"), form, 0);
}
jl_datatype_t *eval_into = (jl_datatype_t *)jl_get_global(jl_core_module, jl_symbol("EvalInto"));
if (eval_into) {
form = jl_new_struct(eval_into, newm);
- jl_set_const(newm, jl_symbol("eval"), form);
+ jl_set_initial_const(newm, jl_symbol("eval"), form, 0);
}
}
- newm->file = jl_symbol(filename);
- jl_gc_wb_knownold(newm, newm->file);
- newm->line = lineno;
+ jl_module_t *old_toplevel_module = jl_precompile_toplevel_module;
+ size_t last_age = ct->world_age;
+
+ if (parent_module == jl_main_module && name == jl_symbol("Base") && jl_base_module == NULL) {
+ // pick up Base module during bootstrap
+ jl_base_module = newm;
+ }
+
+ if (is_parent__toplevel__) {
+ jl_register_root_module(newm);
+ if (jl_options.incremental) {
+ jl_precompile_toplevel_module = newm;
+ }
+ }
+ else {
+ jl_declare_constant_val(NULL, parent_module, name, (jl_value_t*)newm);
+ }
for (int i = 0; i < jl_array_nrows(exprs); i++) {
// process toplevel form
@@ -303,7 +274,7 @@ static jl_value_t *jl_eval_dot_expr(jl_module_t *m, jl_value_t *x, jl_value_t *f
}
extern void check_safe_newbinding(jl_module_t *m, jl_sym_t *var);
-void jl_declare_global(jl_module_t *m, jl_value_t *arg, jl_value_t *set_type) {
+void jl_declare_global(jl_module_t *m, jl_value_t *arg, jl_value_t *set_type, int strong) {
// create uninitialized mutable binding for "global x" decl sometimes or probably
jl_module_t *gm;
jl_sym_t *gs;
@@ -321,42 +292,56 @@ void jl_declare_global(jl_module_t *m, jl_value_t *arg, jl_value_t *set_type) {
size_t new_world = jl_atomic_load_relaxed(&jl_world_counter) + 1;
jl_binding_t *b = jl_get_module_binding(gm, gs, 1);
jl_binding_partition_t *bpart = NULL;
- jl_ptr_kind_union_t new_pku = encode_restriction(set_type, set_type == NULL ? BINDING_KIND_DECLARED : BINDING_KIND_GLOBAL);
+ if (!strong && set_type)
+ jl_error("Weak global definitions cannot have types");
+ enum jl_partition_kind new_kind = strong ? PARTITION_KIND_GLOBAL : PARTITION_KIND_DECLARED;
+ jl_value_t *global_type = set_type;
+ if (strong && !global_type)
+ global_type = (jl_value_t*)jl_any_type;
while (1) {
bpart = jl_get_binding_partition(b, new_world);
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
- if (decode_restriction_kind(pku) != BINDING_KIND_GLOBAL) {
- if (jl_bkind_is_some_guard(decode_restriction_kind(pku))) {
- if (decode_restriction_kind(pku) == BINDING_KIND_DECLARED && !set_type)
- goto done;
+ enum jl_partition_kind kind = jl_binding_kind(bpart);
+ if (kind != PARTITION_KIND_GLOBAL) {
+ if (jl_bkind_is_some_implicit(kind) || kind == PARTITION_KIND_DECLARED) {
+ if (kind == new_kind) {
+ if (!set_type)
+ goto done;
+ goto check_type;
+ }
check_safe_newbinding(gm, gs);
- if (jl_atomic_cmpswap(&bpart->restriction, &pku, new_pku)) {
- break;
+ if (jl_atomic_load_relaxed(&bpart->min_world) == new_world) {
+ bpart->kind = new_kind | (bpart->kind & PARTITION_MASK_FLAG);
+ bpart->restriction = global_type;
+ if (global_type)
+ jl_gc_wb(bpart, global_type);
+ continue;
+ } else {
+ jl_replace_binding_locked(b, bpart, global_type, new_kind, new_world);
}
- continue;
+ break;
} else if (set_type) {
- if (jl_bkind_is_some_constant(decode_restriction_kind(pku))) {
- jl_errorf("cannot set type for imported constant %s.%s.",
+ if (jl_bkind_is_some_constant(kind)) {
+ jl_errorf("cannot set type for constant %s.%s.",
jl_symbol_name(gm->name), jl_symbol_name(gs));
} else {
- jl_errorf("cannot set type for imported global %s.%s.",
+ jl_errorf("cannot set type for imported binding %s.%s.",
jl_symbol_name(gm->name), jl_symbol_name(gs));
}
}
}
- if (!set_type)
- goto done;
- jl_value_t *old_ty = decode_restriction_value(pku);
- JL_GC_PROMISE_ROOTED(old_ty);
- if (!jl_types_equal(set_type, old_ty)) {
- jl_errorf("cannot set type for global %s.%s. It already has a value or is already set to a different type.",
- jl_symbol_name(gm->name), jl_symbol_name(gs));
+ if (set_type)
+ {
+check_type: ;
+ jl_value_t *old_ty = bpart->restriction;
+ JL_GC_PROMISE_ROOTED(old_ty);
+ if (!jl_types_equal(set_type, old_ty)) {
+ jl_errorf("cannot set type for global %s.%s. It already has a value or is already set to a different type.",
+ jl_symbol_name(gm->name), jl_symbol_name(gs));
+ }
+
}
goto done;
}
- if (set_type)
- jl_gc_wb(bpart, set_type);
- bpart->min_world = new_world;
jl_atomic_store_release(&jl_world_counter, new_world);
done:
JL_UNLOCK(&world_counter_lock);
@@ -367,7 +352,7 @@ void jl_eval_global_expr(jl_module_t *m, jl_expr_t *ex, int set_type)
size_t i, l = jl_array_nrows(ex->args);
for (i = 0; i < l; i++) {
jl_value_t *arg = jl_exprarg(ex, i);
- jl_declare_global(m, arg, NULL);
+ jl_declare_global(m, arg, NULL, 0);
}
}
@@ -431,10 +416,8 @@ static void expr_attributes(jl_value_t *v, jl_array_t *body, int *has_ccall, int
if (jl_is_globalref(f)) {
jl_module_t *mod = jl_globalref_mod(f);
jl_sym_t *name = jl_globalref_name(f);
- if (jl_binding_resolved_p(mod, name)) {
- jl_binding_t *b = jl_get_binding(mod, name);
- called = jl_get_binding_value_if_const(b);
- }
+ jl_binding_t *b = jl_get_binding(mod, name);
+ called = jl_get_binding_value_if_const(b);
}
else if (jl_is_quotenode(f)) {
called = jl_quotenode_value(f);
@@ -443,7 +426,7 @@ static void expr_attributes(jl_value_t *v, jl_array_t *body, int *has_ccall, int
if (jl_is_intrinsic(called) && jl_unbox_int32(called) == (int)llvmcall) {
*has_ccall = 1;
}
- if (called == jl_builtin__typebody) {
+ if (called == jl_builtin__typebody) { // TODO: rely on latestworld instead of function callee detection here (or add it to jl_is_toplevel_only_expr)
*has_defs = 1;
}
}
@@ -496,20 +479,18 @@ static void body_attributes(jl_array_t *body, int *has_ccall, int *has_defs, int
}
extern size_t jl_require_world;
-static jl_module_t *call_require(jl_module_t *mod, jl_sym_t *var) JL_GLOBALLY_ROOTED
+static jl_module_t *call_require(jl_task_t *ct, jl_module_t *mod, jl_sym_t *var) JL_GLOBALLY_ROOTED
{
JL_TIMING(LOAD_IMAGE, LOAD_Require);
jl_timing_printf(JL_TIMING_DEFAULT_BLOCK, "%s", jl_symbol_name(var));
int build_mode = jl_options.incremental && jl_generating_output();
jl_module_t *m = NULL;
- jl_task_t *ct = jl_current_task;
static jl_value_t *require_func = NULL;
if (require_func == NULL && jl_base_module != NULL) {
require_func = jl_get_global(jl_base_module, jl_symbol("require"));
}
if (require_func != NULL) {
- size_t last_age = ct->world_age;
ct->world_age = jl_atomic_load_acquire(&jl_world_counter);
if (build_mode && jl_require_world < ct->world_age)
ct->world_age = jl_require_world;
@@ -518,18 +499,19 @@ static jl_module_t *call_require(jl_module_t *mod, jl_sym_t *var) JL_GLOBALLY_RO
reqargs[1] = (jl_value_t*)mod;
reqargs[2] = (jl_value_t*)var;
m = (jl_module_t*)jl_apply(reqargs, 3);
- ct->world_age = last_age;
}
if (m == NULL || !jl_is_module(m)) {
jl_errorf("failed to load module %s", jl_symbol_name(var));
}
+ ct->world_age = jl_atomic_load_acquire(&jl_world_counter);
return m;
}
// either:
// - sets *name and returns the module to import *name from
// - sets *name to NULL and returns a module to import
-static jl_module_t *eval_import_path(jl_module_t *where, jl_module_t *from JL_PROPAGATES_ROOT,
+// also updates world_age
+static jl_module_t *eval_import_path(jl_task_t *ct, jl_module_t *where, jl_module_t *from JL_PROPAGATES_ROOT,
jl_array_t *args, jl_sym_t **name, const char *keyword) JL_GLOBALLY_ROOTED
{
if (jl_array_nrows(args) == 0)
@@ -554,7 +536,7 @@ static jl_module_t *eval_import_path(jl_module_t *where, jl_module_t *from JL_PR
m = jl_base_module;
}
else {
- m = call_require(where, var);
+ m = call_require(ct, where, var);
}
if (i == jl_array_nrows(args))
return m;
@@ -574,6 +556,8 @@ static jl_module_t *eval_import_path(jl_module_t *where, jl_module_t *from JL_PR
}
}
+ ct->world_age = jl_atomic_load_acquire(&jl_world_counter);
+
while (1) {
var = (jl_sym_t*)jl_array_ptr_ref(args, i);
if (!jl_is_symbol(var))
@@ -658,30 +642,30 @@ JL_DLLEXPORT jl_method_instance_t *jl_method_instance_for_thunk(jl_code_info_t *
return mi;
}
-static void import_module(jl_module_t *JL_NONNULL m, jl_module_t *import, jl_sym_t *asname)
+static void import_module(jl_task_t *ct, jl_module_t *JL_NONNULL m, jl_module_t *import, jl_sym_t *asname)
{
assert(m);
jl_sym_t *name = asname ? asname : import->name;
// TODO: this is a bit race-y with what error message we might print
jl_binding_t *b = jl_get_module_binding(m, name, 1);
- jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age);
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
- if (decode_restriction_kind(pku) != BINDING_KIND_GUARD && decode_restriction_kind(pku) != BINDING_KIND_FAILED) {
+ jl_binding_partition_t *bpart = jl_get_binding_partition(b, ct->world_age);
+ enum jl_partition_kind kind = jl_binding_kind(bpart);
+ if (!jl_bkind_is_some_implicit(kind) && kind != PARTITION_KIND_DECLARED) {
// Unlike regular constant declaration, we allow this as long as we eventually end up at a constant.
- pku = jl_walk_binding_inplace(&b, &bpart, jl_current_task->world_age);
- if (decode_restriction_kind(pku) == BINDING_KIND_CONST || decode_restriction_kind(pku) == BINDING_KIND_BACKDATED_CONST || decode_restriction_kind(pku) == BINDING_KIND_CONST_IMPORT) {
+ jl_walk_binding_inplace(&b, &bpart, ct->world_age);
+ if (jl_bkind_is_some_constant(jl_binding_kind(bpart))) {
// Already declared (e.g. on another thread) or imported.
- if (decode_restriction_value(pku) == (jl_value_t*)import)
+ if (bpart->restriction == (jl_value_t*)import)
return;
}
jl_errorf("importing %s into %s conflicts with an existing global",
jl_symbol_name(name), jl_symbol_name(m->name));
}
- jl_declare_constant_val2(b, m, name, (jl_value_t*)import, BINDING_KIND_CONST_IMPORT);
+ jl_declare_constant_val2(b, m, name, (jl_value_t*)import, PARTITION_KIND_CONST_IMPORT);
}
// in `import A.B: x, y, ...`, evaluate the `A.B` part if it exists
-static jl_module_t *eval_import_from(jl_module_t *m JL_PROPAGATES_ROOT, jl_expr_t *ex, const char *keyword)
+static jl_module_t *eval_import_from(jl_task_t *ct, jl_module_t *m JL_PROPAGATES_ROOT, jl_expr_t *ex, const char *keyword)
{
if (jl_expr_nargs(ex) == 1 && jl_is_expr(jl_exprarg(ex, 0))) {
jl_expr_t *fr = (jl_expr_t*)jl_exprarg(ex, 0);
@@ -690,7 +674,7 @@ static jl_module_t *eval_import_from(jl_module_t *m JL_PROPAGATES_ROOT, jl_expr_
jl_expr_t *path = (jl_expr_t*)jl_exprarg(fr, 0);
if (((jl_expr_t*)path)->head == jl_dot_sym) {
jl_sym_t *name = NULL;
- jl_module_t *from = eval_import_path(m, NULL, path->args, &name, keyword);
+ jl_module_t *from = eval_import_path(ct, m, NULL, path->args, &name, keyword);
if (name != NULL) {
from = (jl_module_t*)jl_eval_global_var(from, name);
if (!from || !jl_is_module(from))
@@ -739,82 +723,6 @@ static void jl_eval_errorf(jl_module_t *m, const char *filename, int lineno, con
JL_GC_POP();
}
-JL_DLLEXPORT jl_binding_partition_t *jl_declare_constant_val3(
- jl_binding_t *b, jl_module_t *mod, jl_sym_t *var, jl_value_t *val,
- enum jl_partition_kind constant_kind, size_t new_world)
-{
- JL_GC_PUSH1(&val);
- if (!b) {
- b = jl_get_module_binding(mod, var, 1);
- }
- jl_binding_partition_t *bpart = jl_get_binding_partition(b, new_world);
- jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction);
- int did_warn = 0;
- while (1) {
- enum jl_partition_kind kind = decode_restriction_kind(pku);
- if (jl_bkind_is_some_constant(kind)) {
- if (!val) {
- break;
- }
- jl_value_t *old = decode_restriction_value(pku);
- JL_GC_PROMISE_ROOTED(old);
- if (jl_egal(val, old))
- break;
- if (!did_warn) {
- if (jl_typeof(val) != jl_typeof(old) || jl_is_type(val) || jl_is_module(val))
- jl_errorf("invalid redefinition of constant %s.%s",
- jl_symbol_name(mod->name),
- jl_symbol_name(var));
- else
- jl_safe_printf("WARNING: redefinition of constant %s.%s. This may fail, cause incorrect answers, or produce other errors.\n",
- jl_symbol_name(mod->name),
- jl_symbol_name(var));
- did_warn = 1;
- }
- if (new_world > bpart->min_world) {
- // TODO: Invoke invalidation logic here
- jl_atomic_store_relaxed(&bpart->max_world, new_world - 1);
- bpart = jl_get_binding_partition(b, new_world);
- pku = jl_atomic_load_relaxed(&bpart->restriction);
- }
- } else if (!jl_bkind_is_some_guard(decode_restriction_kind(pku))) {
- if (jl_bkind_is_some_import(decode_restriction_kind(pku))) {
- jl_errorf("cannot declare %s.%s constant; it was already declared as an import",
- jl_symbol_name(mod->name), jl_symbol_name(var));
- } else {
- jl_errorf("cannot declare %s.%s constant; it was already declared global",
- jl_symbol_name(mod->name), jl_symbol_name(var));
- }
- }
- if (!jl_atomic_cmpswap(&bpart->restriction, &pku, encode_restriction(val, constant_kind))) {
- continue;
- }
- jl_gc_wb(bpart, val);
- size_t prev_min_world = bpart->min_world;
- bpart->min_world = new_world;
- int need_backdate = 0;
- if (new_world && val) {
- if (prev_min_world == 0) {
- need_backdate = 1;
- } else if (kind == BINDING_KIND_DECLARED) {
- jl_binding_partition_t *prev_bpart = jl_get_binding_partition(b, prev_min_world-1);
- jl_ptr_kind_union_t prev_pku = jl_atomic_load_relaxed(&prev_bpart->restriction);
- if (prev_bpart->min_world == 0 && decode_restriction_kind(prev_pku) == BINDING_KIND_GUARD) {
- // Just keep it simple and use one backdated const entry for both previous guard partition
- // ranges.
- jl_atomic_store_relaxed(&prev_bpart->max_world, new_world-1);
- need_backdate = 1;
- }
- }
- }
- if (need_backdate) {
- jl_declare_constant_val3(b, mod, var, val, BINDING_KIND_BACKDATED_CONST, 0);
- }
- }
- JL_GC_POP();
- return bpart;
-}
-
JL_DLLEXPORT jl_binding_partition_t *jl_declare_constant_val2(
jl_binding_t *b, jl_module_t *mod, jl_sym_t *var, jl_value_t *val,
enum jl_partition_kind constant_kind)
@@ -822,7 +730,7 @@ JL_DLLEXPORT jl_binding_partition_t *jl_declare_constant_val2(
JL_LOCK(&world_counter_lock);
size_t new_world = jl_atomic_load_relaxed(&jl_world_counter) + 1;
jl_binding_partition_t *bpart = jl_declare_constant_val3(b, mod, var, val, constant_kind, new_world);
- if (bpart->min_world == new_world)
+ if (jl_atomic_load_relaxed(&bpart->min_world) == new_world)
jl_atomic_store_release(&jl_world_counter, new_world);
JL_UNLOCK(&world_counter_lock);
return bpart;
@@ -830,7 +738,7 @@ JL_DLLEXPORT jl_binding_partition_t *jl_declare_constant_val2(
JL_DLLEXPORT jl_binding_partition_t *jl_declare_constant_val(jl_binding_t *b, jl_module_t *mod, jl_sym_t *var, jl_value_t *val)
{
- return jl_declare_constant_val2(b, mod, var, val, val ? BINDING_KIND_CONST : BINDING_KIND_UNDEF_CONST);
+ return jl_declare_constant_val2(b, mod, var, val, val ? PARTITION_KIND_CONST : PARTITION_KIND_UNDEF_CONST);
}
JL_DLLEXPORT void jl_eval_const_decl(jl_module_t *m, jl_value_t *arg, jl_value_t *val)
@@ -912,8 +820,7 @@ JL_DLLEXPORT jl_value_t *jl_toplevel_eval_flex(jl_module_t *JL_NONNULL m, jl_val
}
else if (head == jl_using_sym) {
jl_sym_t *name = NULL;
- jl_module_t *from = eval_import_from(m, ex, "using");
- ct->world_age = jl_atomic_load_acquire(&jl_world_counter);
+ jl_module_t *from = eval_import_from(ct, m, ex, "using");
size_t i = 0;
if (from) {
i = 1;
@@ -923,10 +830,10 @@ JL_DLLEXPORT jl_value_t *jl_toplevel_eval_flex(jl_module_t *JL_NONNULL m, jl_val
jl_value_t *a = jl_exprarg(ex, i);
if (jl_is_expr(a) && ((jl_expr_t*)a)->head == jl_dot_sym) {
name = NULL;
- jl_module_t *import = eval_import_path(m, from, ((jl_expr_t*)a)->args, &name, "using");
+ jl_module_t *import = eval_import_path(ct, m, from, ((jl_expr_t*)a)->args, &name, "using");
if (from) {
// `using A: B` and `using A: B.c` syntax
- jl_module_use(m, import, name);
+ jl_module_use(ct, m, import, name);
}
else {
jl_module_t *u = import;
@@ -941,7 +848,7 @@ JL_DLLEXPORT jl_value_t *jl_toplevel_eval_flex(jl_module_t *JL_NONNULL m, jl_val
if (m == jl_main_module && name == NULL) {
// TODO: for now, `using A` in Main also creates an explicit binding for `A`
// This will possibly be extended to all modules.
- import_module(m, u, NULL);
+ import_module(ct, m, u, NULL);
}
}
continue;
@@ -952,12 +859,11 @@ JL_DLLEXPORT jl_value_t *jl_toplevel_eval_flex(jl_module_t *JL_NONNULL m, jl_val
if (jl_is_symbol(asname)) {
jl_expr_t *path = (jl_expr_t*)jl_exprarg(a, 0);
name = NULL;
- jl_module_t *import = eval_import_path(m, from, ((jl_expr_t*)path)->args, &name, "using");
- ct->world_age = jl_atomic_load_acquire(&jl_world_counter);
+ jl_module_t *import = eval_import_path(ct, m, from, ((jl_expr_t*)path)->args, &name, "using");
assert(name);
check_macro_rename(name, asname, "using");
// `using A: B as C` syntax
- jl_module_use_as(m, import, name, asname);
+ jl_module_use_as(ct, m, import, name, asname);
continue;
}
}
@@ -970,8 +876,7 @@ JL_DLLEXPORT jl_value_t *jl_toplevel_eval_flex(jl_module_t *JL_NONNULL m, jl_val
}
else if (head == jl_import_sym) {
jl_sym_t *name = NULL;
- jl_module_t *from = eval_import_from(m, ex, "import");
- ct->world_age = jl_atomic_load_acquire(&jl_world_counter);
+ jl_module_t *from = eval_import_from(ct, m, ex, "import");
size_t i = 0;
if (from) {
i = 1;
@@ -981,14 +886,14 @@ JL_DLLEXPORT jl_value_t *jl_toplevel_eval_flex(jl_module_t *JL_NONNULL m, jl_val
jl_value_t *a = jl_exprarg(ex, i);
if (jl_is_expr(a) && ((jl_expr_t*)a)->head == jl_dot_sym) {
name = NULL;
- jl_module_t *import = eval_import_path(m, from, ((jl_expr_t*)a)->args, &name, "import");
+ jl_module_t *import = eval_import_path(ct, m, from, ((jl_expr_t*)a)->args, &name, "import");
if (name == NULL) {
// `import A` syntax
- import_module(m, import, NULL);
+ import_module(ct, m, import, NULL);
}
else {
// `import A.B` or `import A: B` syntax
- jl_module_import(m, import, name);
+ jl_module_import(ct, m, import, name);
}
continue;
}
@@ -998,15 +903,15 @@ JL_DLLEXPORT jl_value_t *jl_toplevel_eval_flex(jl_module_t *JL_NONNULL m, jl_val
if (jl_is_symbol(asname)) {
jl_expr_t *path = (jl_expr_t*)jl_exprarg(a, 0);
name = NULL;
- jl_module_t *import = eval_import_path(m, from, ((jl_expr_t*)path)->args, &name, "import");
+ jl_module_t *import = eval_import_path(ct, m, from, ((jl_expr_t*)path)->args, &name, "import");
if (name == NULL) {
// `import A as B` syntax
- import_module(m, import, asname);
+ import_module(ct, m, import, asname);
}
else {
check_macro_rename(name, asname, "import");
// `import A.B as C` syntax
- jl_module_import_as(m, import, name, asname);
+ jl_module_import_as(ct, m, import, name, asname);
}
continue;
}
@@ -1020,14 +925,29 @@ JL_DLLEXPORT jl_value_t *jl_toplevel_eval_flex(jl_module_t *JL_NONNULL m, jl_val
}
else if (head == jl_export_sym || head == jl_public_sym) {
int exp = (head == jl_export_sym);
- for (size_t i = 0; i < jl_array_nrows(ex->args); i++) {
- jl_sym_t *name = (jl_sym_t*)jl_array_ptr_ref(ex->args, i);
- if (!jl_is_symbol(name))
- jl_eval_errorf(m, *toplevel_filename, *toplevel_lineno,
- exp ? "syntax: malformed \"export\" statement" :
- "syntax: malformed \"public\" statement");
- jl_module_public(m, name, exp);
+ volatile int any_new = 0;
+ JL_LOCK(&world_counter_lock);
+ size_t new_world = jl_atomic_load_acquire(&jl_world_counter)+1;
+ JL_TRY {
+ for (size_t i = 0; i < jl_array_nrows(ex->args); i++) {
+ jl_sym_t *name = (jl_sym_t*)jl_array_ptr_ref(ex->args, i);
+ if (!jl_is_symbol(name))
+ jl_eval_errorf(m, *toplevel_filename, *toplevel_lineno,
+ exp ? "syntax: malformed \"export\" statement" :
+ "syntax: malformed \"public\" statement");
+ if (jl_module_public_(m, name, exp, new_world))
+ any_new = 1;
+ }
+ }
+ JL_CATCH {
+ if (any_new)
+ jl_atomic_store_release(&jl_world_counter, new_world);
+ JL_UNLOCK(&world_counter_lock);
+ jl_rethrow();
}
+ if (any_new)
+ jl_atomic_store_release(&jl_world_counter, new_world);
+ JL_UNLOCK(&world_counter_lock);
JL_GC_POP();
return jl_nothing;
}
@@ -1035,7 +955,7 @@ JL_DLLEXPORT jl_value_t *jl_toplevel_eval_flex(jl_module_t *JL_NONNULL m, jl_val
size_t i, l = jl_array_nrows(ex->args);
for (i = 0; i < l; i++) {
jl_value_t *arg = jl_exprarg(ex, i);
- jl_declare_global(m, arg, NULL);
+ jl_declare_global(m, arg, NULL, 0);
}
JL_GC_POP();
return jl_nothing;
diff --git a/src/typemap.c b/src/typemap.c
index b8b699e101fe5..8c0e585601944 100644
--- a/src/typemap.c
+++ b/src/typemap.c
@@ -23,29 +23,29 @@ static int jl_is_any(jl_value_t *t1)
return t1 == (jl_value_t*)jl_any_type;
}
-static jl_value_t *jl_type_extract_name(jl_value_t *t1 JL_PROPAGATES_ROOT) JL_NOTSAFEPOINT
+static jl_value_t *jl_type_extract_name(jl_value_t *t1 JL_PROPAGATES_ROOT, int invariant) JL_NOTSAFEPOINT
{
if (jl_is_unionall(t1))
t1 = jl_unwrap_unionall(t1);
if (jl_is_vararg(t1)) {
- return jl_type_extract_name(jl_unwrap_vararg(t1));
+ return jl_type_extract_name(jl_unwrap_vararg(t1), invariant);
}
else if (jl_is_typevar(t1)) {
- return jl_type_extract_name(((jl_tvar_t*)t1)->ub);
+ return jl_type_extract_name(((jl_tvar_t*)t1)->ub, invariant);
}
else if (t1 == jl_bottom_type || t1 == (jl_value_t*)jl_typeofbottom_type || t1 == (jl_value_t*)jl_typeofbottom_type->super) {
return (jl_value_t*)jl_typeofbottom_type->name; // put Union{} and typeof(Union{}) and Type{Union{}} together for convenience
}
else if (jl_is_datatype(t1)) {
jl_datatype_t *dt = (jl_datatype_t*)t1;
- if (!jl_is_kind(t1))
- return (jl_value_t*)dt->name;
- return NULL;
+ if (jl_is_kind(t1) && !invariant)
+ return (jl_value_t*)jl_type_typename;
+ return (jl_value_t*)dt->name;
}
else if (jl_is_uniontype(t1)) {
jl_uniontype_t *u1 = (jl_uniontype_t*)t1;
- jl_value_t *tn1 = jl_type_extract_name(u1->a);
- jl_value_t *tn2 = jl_type_extract_name(u1->b);
+ jl_value_t *tn1 = jl_type_extract_name(u1->a, invariant);
+ jl_value_t *tn2 = jl_type_extract_name(u1->b, invariant);
if (tn1 == tn2)
return tn1;
// TODO: if invariant is false, instead find the nearest common ancestor
@@ -71,7 +71,7 @@ static int jl_type_extract_name_precise(jl_value_t *t1, int invariant)
}
else if (jl_is_datatype(t1)) {
jl_datatype_t *dt = (jl_datatype_t*)t1;
- if ((invariant || !dt->name->abstract) && !jl_is_kind(t1))
+ if (invariant || !dt->name->abstract || dt->name == jl_type_typename)
return 1;
return 0;
}
@@ -81,8 +81,8 @@ static int jl_type_extract_name_precise(jl_value_t *t1, int invariant)
return 0;
if (!jl_type_extract_name_precise(u1->b, invariant))
return 0;
- jl_value_t *tn1 = jl_type_extract_name(u1->a);
- jl_value_t *tn2 = jl_type_extract_name(u1->b);
+ jl_value_t *tn1 = jl_type_extract_name(u1->a, invariant);
+ jl_value_t *tn2 = jl_type_extract_name(u1->b, invariant);
if (tn1 == tn2)
return 1;
return 0;
@@ -469,7 +469,7 @@ static int jl_typemap_intersection_memory_visitor(jl_genericmemory_t *a, jl_valu
tydt = (jl_datatype_t*)ttype;
}
else if (ttype) {
- ttype = jl_type_extract_name(ttype);
+ ttype = jl_type_extract_name(ttype, tparam & 1);
tydt = ttype ? (jl_datatype_t*)jl_unwrap_unionall(((jl_typename_t*)ttype)->wrapper) : NULL;
}
if (tydt == jl_any_type)
@@ -569,10 +569,8 @@ int has_covariant_var(jl_datatype_t *ttypes, jl_tvar_t *tv)
void typemap_slurp_search(jl_typemap_entry_t *ml, struct typemap_intersection_env *closure)
{
- // n.b. we could consider mt->max_args here too, so this optimization
- // usually works even if the user forgets the `slurp...` argument, but
- // there is discussion that parameter may be going away? (and it is
- // already not accurately up-to-date for all tables currently anyways)
+ // TODO: we should consider nparams(closure->type) here too, so this optimization
+ // usually works even if the user forgets the `slurp...` argument
if (closure->search_slurp && ml->va) {
jl_value_t *sig = jl_unwrap_unionall((jl_value_t*)ml->sig);
size_t nargs = jl_nparams(sig);
@@ -641,7 +639,7 @@ int jl_typemap_intersection_visitor(jl_typemap_t *map, int offs,
if (maybe_type && !maybe_kind) {
typetype = jl_unwrap_unionall(ty);
typetype = jl_is_type_type(typetype) ? jl_tparam0(typetype) : NULL;
- name = typetype ? jl_type_extract_name(typetype) : NULL;
+ name = typetype ? jl_type_extract_name(typetype, 1) : NULL;
if (!typetype)
exclude_typeofbottom = !jl_subtype((jl_value_t*)jl_typeofbottom_type, ty);
else if (jl_is_typevar(typetype))
@@ -717,7 +715,7 @@ int jl_typemap_intersection_visitor(jl_typemap_t *map, int offs,
}
}
else {
- jl_value_t *name = jl_type_extract_name(ty);
+ jl_value_t *name = jl_type_extract_name(ty, 0);
if (name && jl_type_extract_name_precise(ty, 0)) {
// direct lookup of leaf types
jl_value_t *ml = mtcache_hash_lookup(cachearg1, name);
@@ -782,7 +780,7 @@ int jl_typemap_intersection_visitor(jl_typemap_t *map, int offs,
}
jl_genericmemory_t *name1 = jl_atomic_load_relaxed(&cache->name1);
if (name1 != (jl_genericmemory_t*)jl_an_empty_memory_any) {
- jl_value_t *name = jl_type_extract_name(ty);
+ jl_value_t *name = jl_type_extract_name(ty, 0);
if (name && jl_type_extract_name_precise(ty, 0)) {
jl_datatype_t *super = (jl_datatype_t*)jl_unwrap_unionall(((jl_typename_t*)name)->wrapper);
// direct lookup of concrete types
@@ -1003,7 +1001,7 @@ jl_typemap_entry_t *jl_typemap_assoc_by_type(
// now look at the optimized TypeName caches
jl_genericmemory_t *tname = jl_atomic_load_relaxed(&cache->tname);
if (tname != (jl_genericmemory_t*)jl_an_empty_memory_any) {
- jl_value_t *a0 = ty && jl_is_type_type(ty) ? jl_type_extract_name(jl_tparam0(ty)) : NULL;
+ jl_value_t *a0 = ty && jl_is_type_type(ty) ? jl_type_extract_name(jl_tparam0(ty), 1) : NULL;
if (a0) { // TODO: if we start analyzing Union types in jl_type_extract_name, then a0 might be over-approximated here, leading us to miss possible subtypes
jl_datatype_t *super = (jl_datatype_t*)jl_unwrap_unionall(((jl_typename_t*)a0)->wrapper);
while (1) {
@@ -1042,7 +1040,7 @@ jl_typemap_entry_t *jl_typemap_assoc_by_type(
jl_genericmemory_t *name1 = jl_atomic_load_relaxed(&cache->name1);
if (name1 != (jl_genericmemory_t*)jl_an_empty_memory_any) {
if (ty) {
- jl_value_t *a0 = jl_type_extract_name(ty);
+ jl_value_t *a0 = jl_type_extract_name(ty, 0);
if (a0) { // TODO: if we start analyzing Union types in jl_type_extract_name, then a0 might be over-approximated here, leading us to miss possible subtypes
jl_datatype_t *super = (jl_datatype_t*)jl_unwrap_unionall(((jl_typename_t*)a0)->wrapper);
while (1) {
@@ -1200,7 +1198,7 @@ jl_typemap_entry_t *jl_typemap_level_assoc_exact(jl_typemap_level_t *cache, jl_v
}
jl_genericmemory_t *tname = jl_atomic_load_relaxed(&cache->tname);
if (jl_is_kind(ty) && tname != (jl_genericmemory_t*)jl_an_empty_memory_any) {
- jl_value_t *name = jl_type_extract_name(a1);
+ jl_value_t *name = jl_type_extract_name(a1, 1);
if (name) {
if (ty != (jl_value_t*)jl_datatype_type)
a1 = jl_unwrap_unionall(((jl_typename_t*)name)->wrapper);
@@ -1447,12 +1445,12 @@ static void jl_typemap_level_insert_(
jl_value_t *a0;
t1 = jl_unwrap_unionall(t1);
if (jl_is_type_type(t1)) {
- a0 = jl_type_extract_name(jl_tparam0(t1));
+ a0 = jl_type_extract_name(jl_tparam0(t1), 1);
jl_datatype_t *super = a0 ? (jl_datatype_t*)jl_unwrap_unionall(((jl_typename_t*)a0)->wrapper) : jl_any_type;
jl_typemap_memory_insert_(map, &cache->tname, (jl_value_t*)super->name, newrec, (jl_value_t*)cache, 1, offs, NULL);
return;
}
- a0 = jl_type_extract_name(t1);
+ a0 = jl_type_extract_name(t1, 0);
if (a0 && a0 != (jl_value_t*)jl_any_type->name) {
jl_typemap_memory_insert_(map, &cache->name1, a0, newrec, (jl_value_t*)cache, 0, offs, NULL);
return;
diff --git a/src/utils.scm b/src/utils.scm
index 79e3a280b9886..80fc44615a49a 100644
--- a/src/utils.scm
+++ b/src/utils.scm
@@ -119,3 +119,16 @@
(cons (car lst) (filter (lambda (x) (not (pred x))) (cdr lst))))
(else
(cons (car lst) (keep-first pred (cdr lst))))))
+
+(define (take lst n)
+ (let loop ((lst lst) (n n) (out '()))
+ (if (= n 0) (reverse out)
+ (loop (cdr lst) (- n 1) (cons (car lst) out)))))
+
+(define (drop lst n)
+ (if (= n 0) lst
+ (drop (cdr lst) (- n 1))))
+
+;; functional update at position i
+(define (list-set lst i val)
+ (append (take lst i) (list val) (drop lst (+ i 1))))
diff --git a/stdlib/Dates/src/Dates.jl b/stdlib/Dates/src/Dates.jl
index a4600a5f82043..0e6d0d0ef6986 100644
--- a/stdlib/Dates/src/Dates.jl
+++ b/stdlib/Dates/src/Dates.jl
@@ -81,4 +81,6 @@ export Period, DatePeriod, TimePeriod,
# io.jl
ISODateTimeFormat, ISODateFormat, ISOTimeFormat, DateFormat, RFC1123Format, @dateformat_str
+public format
+
end # module
diff --git a/stdlib/Dates/src/io.jl b/stdlib/Dates/src/io.jl
index aa7019566093c..88c32bf064bf0 100644
--- a/stdlib/Dates/src/io.jl
+++ b/stdlib/Dates/src/io.jl
@@ -478,7 +478,7 @@ but creates the DateFormat object once during macro expansion.
See [`DateFormat`](@ref) for details about format specifiers.
"""
-macro dateformat_str(str)
+macro dateformat_str(str::String)
DateFormat(str)
end
diff --git a/stdlib/FileWatching/src/FileWatching.jl b/stdlib/FileWatching/src/FileWatching.jl
index 7c743ce634193..ebfdd9c8fea6b 100644
--- a/stdlib/FileWatching/src/FileWatching.jl
+++ b/stdlib/FileWatching/src/FileWatching.jl
@@ -488,12 +488,11 @@ end
function getproperty(fdw::FDWatcher, s::Symbol)
# support deprecated field names
- s === :readable && return fdw.mask.readable
- s === :writable && return fdw.mask.writable
+ s === :readable && return getfield(fdw, :mask).readable
+ s === :writable && return getfield(fdw, :mask).writable
return getfield(fdw, s)
end
-
close(t::_FDWatcher, mask::FDEvent) = close(t, mask.readable, mask.writable)
function close(t::_FDWatcher, readable::Bool, writable::Bool)
iolock_begin()
diff --git a/stdlib/InteractiveUtils/src/InteractiveUtils.jl b/stdlib/InteractiveUtils/src/InteractiveUtils.jl
index 4a320282610cd..6b75a228b2761 100644
--- a/stdlib/InteractiveUtils/src/InteractiveUtils.jl
+++ b/stdlib/InteractiveUtils/src/InteractiveUtils.jl
@@ -17,7 +17,7 @@ export apropos, edit, less, code_warntype, code_llvm, code_native, methodswith,
import Base.Docs.apropos
using Base: unwrap_unionall, rewrap_unionall, isdeprecated, Bottom, summarysize,
- signature_type, format_bytes, isbindingresolved
+ signature_type, format_bytes
using Base.Libc
using Markdown
@@ -264,7 +264,7 @@ function _subtypes_in!(mods::Array, x::Type)
m = pop!(mods)
xt = xt::DataType
for s in names(m, all = true)
- if isbindingresolved(m, s) && !isdeprecated(m, s) && isdefined(m, s)
+ if !isdeprecated(m, s) && isdefined(m, s)
t = getfield(m, s)
dt = isa(t, UnionAll) ? unwrap_unionall(t) : t
if isa(dt, DataType)
diff --git a/stdlib/InteractiveUtils/test/runtests.jl b/stdlib/InteractiveUtils/test/runtests.jl
index 0de67fea69dea..739ed5fac9ef2 100644
--- a/stdlib/InteractiveUtils/test/runtests.jl
+++ b/stdlib/InteractiveUtils/test/runtests.jl
@@ -658,6 +658,10 @@ file, ln = functionloc(versioninfo, Tuple{})
@test isfile(pathof(InteractiveUtils))
@test isdir(pkgdir(InteractiveUtils))
+# compiler stdlib path updating
+file, ln = functionloc(Core.Compiler.tmeet, Tuple{Int, Float64})
+@test isfile(file)
+
@testset "buildbot path updating" begin
file, ln = functionloc(versioninfo, Tuple{})
@test isfile(file)
diff --git a/stdlib/JuliaSyntaxHighlighting.version b/stdlib/JuliaSyntaxHighlighting.version
index 14eb1cedf49a4..1c9bfb131dc0f 100644
--- a/stdlib/JuliaSyntaxHighlighting.version
+++ b/stdlib/JuliaSyntaxHighlighting.version
@@ -1,4 +1,4 @@
JULIASYNTAXHIGHLIGHTING_BRANCH = main
-JULIASYNTAXHIGHLIGHTING_SHA1 = 2680c8bde1aa274f25d7a434c645f16b3a1ee731
+JULIASYNTAXHIGHLIGHTING_SHA1 = b7a1c636d3e9690bfbbfe917bb20f6cb112a3e6f
JULIASYNTAXHIGHLIGHTING_GIT_URL := https://github.com/julialang/JuliaSyntaxHighlighting.jl.git
JULIASYNTAXHIGHLIGHTING_TAR_URL = https://api.github.com/repos/julialang/JuliaSyntaxHighlighting.jl/tarball/$1
diff --git a/stdlib/LibGit2/src/LibGit2.jl b/stdlib/LibGit2/src/LibGit2.jl
index 04435dd577c19..4eed62331bdbc 100644
--- a/stdlib/LibGit2/src/LibGit2.jl
+++ b/stdlib/LibGit2/src/LibGit2.jl
@@ -1042,24 +1042,20 @@ function set_ssl_cert_locations(cert_loc)
else # files, /dev/null, non-existent paths, etc.
cert_file = cert_loc
end
- ret = @ccall libgit2.git_libgit2_opts(
+ ret = @ccall libgit2.git_libgit2_opts(
Consts.SET_SSL_CERT_LOCATIONS::Cint;
cert_file::Cstring,
cert_dir::Cstring)::Cint
ret >= 0 && return ret
+ # On macOS and Windows LibGit2_jll is built without a TLS backend that supports
+ # certificate locations; don't throw on this expected error so we allow certificate
+ # location environment variables to be set for other purposes.
+ # We still try doing so to support other LibGit2 builds.
err = Error.GitError(ret)
err.class == Error.SSL &&
err.msg == "TLS backend doesn't support certificate locations" ||
throw(err)
- var = nothing
- for v in NetworkOptions.CA_ROOTS_VARS
- haskey(ENV, v) && (var = v)
- end
- @assert var !== nothing # otherwise we shouldn't be here
- msg = """
- Your Julia is built with a SSL/TLS engine that libgit2 doesn't know how to configure to use a file or directory of certificate authority roots, but your environment specifies one via the $var variable. If you believe your system's root certificates are safe to use, you can `export JULIA_SSL_CA_ROOTS_PATH=""` in your environment to use those instead.
- """
- throw(Error.GitError(err.class, err.code, chomp(msg)))
+ return ret
end
"""
diff --git a/stdlib/LibGit2/test/bad_ca_roots.jl b/stdlib/LibGit2/test/bad_ca_roots.jl
index 4882065167bdb..4caed4ed90beb 100644
--- a/stdlib/LibGit2/test/bad_ca_roots.jl
+++ b/stdlib/LibGit2/test/bad_ca_roots.jl
@@ -12,20 +12,24 @@ const CAN_SET_CA_ROOTS_PATH = !Sys.isapple() && !Sys.iswindows()
# Given this is a sub-processed test file, not using @testsets avoids
# leaking the report print into the Base test runner report
begin # empty CA roots file
- # these fail for different reasons on different platforms:
- # - on Apple & Windows you cannot set the CA roots path location
- # - on Linux & FreeBSD you you can but these are invalid files
+ # different behavior on different platforms:
+ # - on Apple & Windows you cannot set the CA roots path location; don't error
+ # - on Linux & FreeBSD you can but these are invalid files
+
ENV["JULIA_SSL_CA_ROOTS_PATH"] = "/dev/null"
- @test_throws LibGit2.GitError LibGit2.ensure_initialized()
+ if CAN_SET_CA_ROOTS_PATH
+ @test_throws LibGit2.GitError LibGit2.ensure_initialized()
+ else
+ @test LibGit2.ensure_initialized() === nothing
+ end
+
ENV["JULIA_SSL_CA_ROOTS_PATH"] = tempname()
- @test_throws LibGit2.GitError LibGit2.ensure_initialized()
- # test that it still fails if called a second time
- @test_throws LibGit2.GitError LibGit2.ensure_initialized()
- if !CAN_SET_CA_ROOTS_PATH
- # test that this doesn't work on macOS & Windows
- ENV["JULIA_SSL_CA_ROOTS_PATH"] = NetworkOptions.bundled_ca_roots()
+ if CAN_SET_CA_ROOTS_PATH
+ @test_throws LibGit2.GitError LibGit2.ensure_initialized()
+ # test that it still fails if called a second time
@test_throws LibGit2.GitError LibGit2.ensure_initialized()
- delete!(ENV, "JULIA_SSL_CA_ROOTS_PATH")
+ else
+ @test LibGit2.ensure_initialized() === nothing
@test LibGit2.ensure_initialized() === nothing
end
end
diff --git a/stdlib/LinearAlgebra.version b/stdlib/LinearAlgebra.version
index 96c00ca746651..0aa71a8804660 100644
--- a/stdlib/LinearAlgebra.version
+++ b/stdlib/LinearAlgebra.version
@@ -1,4 +1,4 @@
-LINEARALGEBRA_BRANCH = master
-LINEARALGEBRA_SHA1 = e7da19f2764ba36bd0a9eb8ec67dddce19d87114
+LINEARALGEBRA_BRANCH = release-1.12
+LINEARALGEBRA_SHA1 = 4e7c3f40316a956119ac419a97c4b8aad7a17e6c
LINEARALGEBRA_GIT_URL := https://github.com/JuliaLang/LinearAlgebra.jl.git
LINEARALGEBRA_TAR_URL = https://api.github.com/repos/JuliaLang/LinearAlgebra.jl/tarball/$1
diff --git a/stdlib/Logging/test/runtests.jl b/stdlib/Logging/test/runtests.jl
index 2fedbde557078..3e92b7d9e2697 100644
--- a/stdlib/Logging/test/runtests.jl
+++ b/stdlib/Logging/test/runtests.jl
@@ -306,4 +306,47 @@ end
@test isempty(undoc)
end
+@testset "Logging when multithreaded" begin
+ n = 10000
+ cmd = `$(Base.julia_cmd()) -t4 --color=no $(joinpath(@__DIR__, "threads_exec.jl")) $n`
+ fname = tempname()
+ @testset "Thread safety" begin
+ f = open(fname, "w")
+ @test success(run(pipeline(cmd, stderr=f)))
+ close(f)
+ end
+
+ @testset "No tearing in log printing" begin
+ # Check for print tearing by verifying that each log entry starts and ends correctly
+ f = open(fname, "r")
+ entry_start = r"┌ (Info|Warning|Error): iteration"
+ entry_end = r"└ "
+
+ open_entries = 0
+ total_entries = 0
+ for line in eachline(fname)
+ starts = count(entry_start, line)
+ starts > 1 && error("Interleaved logs: Multiple log entries started on one line")
+ if starts == 1
+ startswith(line, entry_start) || error("Interleaved logs: Log entry started in the middle of a line")
+ open_entries += 1
+ total_entries += 1
+ end
+
+ ends = count(entry_end, line)
+ starts == 1 && ends == 1 && error("Interleaved logs: Log entry started and and another ended on one line")
+ ends > 1 && error("Interleaved logs: Multiple log entries ended on one line")
+ if ends == 1
+ startswith(line, entry_end) || error("Interleaved logs: Log entry ended in the middle of a line")
+ open_entries -= 1
+ end
+ # Ensure no mismatched log entries
+ open_entries >= 0 || error("Interleaved logs")
+ end
+
+ @test open_entries == 0 # Ensure all entries closed properly
+ @test total_entries == n * 3 # Ensure all logs were printed (3 because @debug is hidden)
+ end
+end
+
end
diff --git a/stdlib/Logging/test/threads_exec.jl b/stdlib/Logging/test/threads_exec.jl
new file mode 100644
index 0000000000000..497a22b1c7b22
--- /dev/null
+++ b/stdlib/Logging/test/threads_exec.jl
@@ -0,0 +1,13 @@
+using Logging
+
+function test_threads_exec(n)
+ Threads.@threads for i in 1:n
+ @debug "iteration" maxlog=1 _id=Symbol("$(i)_debug") i Threads.threadid()
+ @info "iteration" maxlog=1 _id=Symbol("$(i)_info") i Threads.threadid()
+ @warn "iteration" maxlog=1 _id=Symbol("$(i)_warn") i Threads.threadid()
+ @error "iteration" maxlog=1 _id=Symbol("$(i)_error") i Threads.threadid()
+ end
+end
+
+n = parse(Int, ARGS[1])
+test_threads_exec(n)
diff --git a/stdlib/MPFR_jll/Project.toml b/stdlib/MPFR_jll/Project.toml
index 50de38f169ff0..9958383f4e65b 100644
--- a/stdlib/MPFR_jll/Project.toml
+++ b/stdlib/MPFR_jll/Project.toml
@@ -1,6 +1,6 @@
name = "MPFR_jll"
uuid = "3a97d323-0669-5f0c-9066-3539efd106a3"
-version = "4.2.1+2"
+version = "4.2.2+0"
[deps]
GMP_jll = "781609d7-10c4-51f6-84f2-b8444358ff6d"
diff --git a/stdlib/MPFR_jll/test/runtests.jl b/stdlib/MPFR_jll/test/runtests.jl
index fc931b462fa9c..1dbbbb298e737 100644
--- a/stdlib/MPFR_jll/test/runtests.jl
+++ b/stdlib/MPFR_jll/test/runtests.jl
@@ -4,5 +4,5 @@ using Test, Libdl, MPFR_jll
@testset "MPFR_jll" begin
vn = VersionNumber(unsafe_string(ccall((:mpfr_get_version,libmpfr), Cstring, ())))
- @test vn == v"4.2.1"
+ @test vn == v"4.2.2"
end
diff --git a/stdlib/Manifest.toml b/stdlib/Manifest.toml
index b149532d7b203..0dcd58d217d0b 100644
--- a/stdlib/Manifest.toml
+++ b/stdlib/Manifest.toml
@@ -136,7 +136,7 @@ version = "1.11.0"
[[deps.MPFR_jll]]
deps = ["Artifacts", "GMP_jll", "Libdl"]
uuid = "3a97d323-0669-5f0c-9066-3539efd106a3"
-version = "4.2.1+1"
+version = "4.2.2+0"
[[deps.Markdown]]
deps = ["Base64", "JuliaSyntaxHighlighting", "StyledStrings"]
@@ -168,7 +168,7 @@ version = "0.8.5+0"
[[deps.OpenSSL_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95"
-version = "3.0.15+1"
+version = "3.5.0+0"
[[deps.PCRE2_jll]]
deps = ["Artifacts", "Libdl"]
diff --git a/stdlib/Markdown/src/Markdown.jl b/stdlib/Markdown/src/Markdown.jl
index 8d79cc93d6171..723eb6ca68482 100644
--- a/stdlib/Markdown/src/Markdown.jl
+++ b/stdlib/Markdown/src/Markdown.jl
@@ -138,12 +138,16 @@ catdoc(md::MD...) = MD(md...)
if Base.generating_output()
# workload to reduce latency
- md"""
+ show(devnull, MIME("text/plain"), md"""
# H1
## H2
### H3
+ #### H4
+ ##### H5
+ ###### H6
**bold text**
*italicized text*
+ ***bold and italicized text***
> blockquote
1. First item
2. Second item
@@ -151,10 +155,18 @@ if Base.generating_output()
- First item
- Second item
- Third item
+ - Indented item
`code`
Horizontal Rule
---
- """
+ **[Duck Duck Go](https://duckduckgo.com)**
+
+
+ 
+
+ H~2~O
+ X^2^
+ """)
end
end
diff --git a/stdlib/MozillaCACerts_jll/Project.toml b/stdlib/MozillaCACerts_jll/Project.toml
index 2f9bf67e22a74..a951435168922 100644
--- a/stdlib/MozillaCACerts_jll/Project.toml
+++ b/stdlib/MozillaCACerts_jll/Project.toml
@@ -1,7 +1,7 @@
name = "MozillaCACerts_jll"
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
# Keep in sync with `deps/libgit2.version`.
-version = "2024.12.31"
+version = "2025.02.25"
[extras]
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
diff --git a/stdlib/OpenSSL_jll/Project.toml b/stdlib/OpenSSL_jll/Project.toml
index 0773311e11043..28ecf86381213 100644
--- a/stdlib/OpenSSL_jll/Project.toml
+++ b/stdlib/OpenSSL_jll/Project.toml
@@ -1,6 +1,6 @@
name = "OpenSSL_jll"
uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95"
-version = "3.0.15+2"
+version = "3.5.0+0"
[deps]
Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
diff --git a/stdlib/OpenSSL_jll/test/runtests.jl b/stdlib/OpenSSL_jll/test/runtests.jl
index 35431d04bfcac..e5ae938b68311 100644
--- a/stdlib/OpenSSL_jll/test/runtests.jl
+++ b/stdlib/OpenSSL_jll/test/runtests.jl
@@ -6,5 +6,5 @@ using Test, Libdl, OpenSSL_jll
major = ccall((:OPENSSL_version_major, libcrypto), Cuint, ())
minor = ccall((:OPENSSL_version_minor, libcrypto), Cuint, ())
patch = ccall((:OPENSSL_version_patch, libcrypto), Cuint, ())
- @test VersionNumber(major, minor, patch) == v"3.0.15"
+ @test VersionNumber(major, minor, patch) == v"3.5.0"
end
diff --git a/stdlib/Profile/src/Allocs.jl b/stdlib/Profile/src/Allocs.jl
index 9d0b18cb468ca..93c9d3392626f 100644
--- a/stdlib/Profile/src/Allocs.jl
+++ b/stdlib/Profile/src/Allocs.jl
@@ -79,11 +79,11 @@ end
function _prof_expr(expr, opts)
quote
$start(; $(esc(opts)))
- try
+ Base.@__tryfinally(
$(esc(expr))
- finally
+ ,
$stop()
- end
+ )
end
end
diff --git a/stdlib/Profile/src/Profile.jl b/stdlib/Profile/src/Profile.jl
index f59b49d8a4a36..27f32fc453d55 100644
--- a/stdlib/Profile/src/Profile.jl
+++ b/stdlib/Profile/src/Profile.jl
@@ -56,12 +56,12 @@ appended to an internal buffer of backtraces.
"""
macro profile(ex)
return quote
- try
- start_timer()
+ start_timer()
+ Base.@__tryfinally(
$(esc(ex))
- finally
+ ,
stop_timer()
- end
+ )
end
end
@@ -78,12 +78,12 @@ it can be used to diagnose performance issues such as lock contention, IO bottle
"""
macro profile_walltime(ex)
return quote
- try
- start_timer(true)
+ start_timer(true);
+ Base.@__tryfinally(
$(esc(ex))
- finally
+ ,
stop_timer()
- end
+ )
end
end
@@ -980,13 +980,14 @@ mutable struct StackFrameTree{T} # where T <: Union{UInt64, StackFrame}
flat_count::Int # number of times this frame was in the flattened representation (unlike count, this'll sum to 100% of parent)
max_recur::Int # maximum number of times this frame was the *top* of the recursion in the stack
count_recur::Int # sum of the number of times this frame was the *top* of the recursion in a stack (divide by count to get an average)
+ sleeping::Bool # whether this frame was in a sleeping state
down::Dict{T, StackFrameTree{T}}
# construction workers:
recur::Int
builder_key::Vector{UInt64}
builder_value::Vector{StackFrameTree{T}}
up::StackFrameTree{T}
- StackFrameTree{T}() where {T} = new(UNKNOWN, 0, 0, 0, 0, 0, Dict{T, StackFrameTree{T}}(), 0, UInt64[], StackFrameTree{T}[])
+ StackFrameTree{T}() where {T} = new(UNKNOWN, 0, 0, 0, 0, 0, true, Dict{T, StackFrameTree{T}}(), 0, UInt64[], StackFrameTree{T}[])
end
@@ -1027,6 +1028,10 @@ function tree_format(frames::Vector{<:StackFrameTree}, level::Int, cols::Int, ma
base = string(base, "+", nextra, " ")
end
strcount = rpad(string(frame.count), ndigcounts, " ")
+ if frame.sleeping
+ stroverhead = styled"{gray:$(stroverhead)}"
+ strcount = styled"{gray:$(strcount)}"
+ end
if li != UNKNOWN
if li.line == li.pointer
strs[i] = string(stroverhead, "╎", base, strcount, " ",
@@ -1039,6 +1044,7 @@ function tree_format(frames::Vector{<:StackFrameTree}, level::Int, cols::Int, ma
else
fname = string(li.func)
end
+ frame.sleeping && (fname = styled"{gray:$(fname)}")
path, pkgname, filename = short_path(li.file, filenamemap)
if showpointer
fname = string(
@@ -1082,15 +1088,15 @@ function tree!(root::StackFrameTree{T}, all::Vector{UInt64}, lidict::Union{LineI
skip = false
nsleeping = 0
is_task_profile = false
+ is_sleeping = true
for i in startframe:-1:1
(startframe - 1) >= i >= (startframe - (nmeta + 1)) && continue # skip metadata (it's read ahead below) and extra block end NULL IP
ip = all[i]
if is_block_end(all, i)
# read metadata
thread_sleeping_state = all[i - META_OFFSET_SLEEPSTATE] - 1 # subtract 1 as state is incremented to avoid being equal to 0
- if thread_sleeping_state == 2
- is_task_profile = true
- end
+ is_sleeping = thread_sleeping_state == 1
+ is_task_profile = thread_sleeping_state == 2
# cpu_cycle_clock = all[i - META_OFFSET_CPUCYCLECLOCK]
taskid = all[i - META_OFFSET_TASKID]
threadid = all[i - META_OFFSET_THREADID]
@@ -1145,6 +1151,7 @@ function tree!(root::StackFrameTree{T}, all::Vector{UInt64}, lidict::Union{LineI
parent = build[j]
parent.recur += 1
parent.count_recur += 1
+ parent.sleeping &= is_sleeping
found = true
break
end
@@ -1164,6 +1171,7 @@ function tree!(root::StackFrameTree{T}, all::Vector{UInt64}, lidict::Union{LineI
while this !== parent && (recur === :off || this.recur == 0)
this.count += 1
this.recur = 1
+ this.sleeping &= is_sleeping
this = this.up
end
end
@@ -1185,6 +1193,7 @@ function tree!(root::StackFrameTree{T}, all::Vector{UInt64}, lidict::Union{LineI
this.up = parent
this.count += 1
this.recur = 1
+ this.sleeping &= is_sleeping
end
parent = this
end
diff --git a/stdlib/Profile/test/allocs.jl b/stdlib/Profile/test/allocs.jl
index 5607783c782f9..8f6539e0baed6 100644
--- a/stdlib/Profile/test/allocs.jl
+++ b/stdlib/Profile/test/allocs.jl
@@ -8,6 +8,11 @@ let iobuf = IOBuffer()
end
end
+# Issue #57103: This test does not work with MMTk because of fastpath
+# allocation which never calls the allocation profiler.
+# TODO: We should port these observability tools (e.g. allocation
+# profiler and heap snapshot) to MMTk
+@static if Base.USING_STOCK_GC
@testset "alloc profiler doesn't segfault" begin
res = Allocs.@profile sample_rate=1.0 begin
# test the allocations during compilation
@@ -73,14 +78,8 @@ end
@test length(first_alloc.stacktrace) > 0
@test length(string(first_alloc.type)) > 0
- # Issue #57103: This test does not work with MMTk because of fastpath
- # allocation which never calls the allocation profiler.
- # TODO: We should port these observability tools (e.g. allocation
- # profiler and heap snapshot) to MMTk
- @static if Base.USING_STOCK_GC
- @testset for type in (Task, Vector{Float64},)
- @test length(filter(a->a.type <: type, profile.allocs)) >= NUM_TASKS
- end
+ @testset for type in (Task, Vector{Float64},)
+ @test length(filter(a->a.type <: type, profile.allocs)) >= NUM_TASKS
end
# TODO: it would be nice to assert that these tasks
@@ -149,8 +148,6 @@ end
@test length([a for a in prof.allocs if a.type == String]) >= 1
end
-# FIXME: Issue #57103 disabling test for MMTk.
-@static if Base.USING_STOCK_GC
@testset "alloc profiler catches allocs from codegen" begin
@eval begin
struct MyType x::Int; y::Int end
@@ -170,7 +167,6 @@ end
@test length(prof.allocs) >= 1
@test length([a for a in prof.allocs if a.type == MyType]) >= 1
end
-end
@testset "alloc profiler catches allocs from buffer resize" begin
f(a) = for _ in 1:100; push!(a, 1); end
@@ -187,3 +183,4 @@ end
@test length([a for a in prof.allocs if a.type === Allocs.BufferType]) == 1
@test length([a for a in prof.allocs if a.type === Memory{Int}]) >= 2
end
+end
diff --git a/stdlib/Profile/test/runtests.jl b/stdlib/Profile/test/runtests.jl
index e7877b949a17e..b487d8963f156 100644
--- a/stdlib/Profile/test/runtests.jl
+++ b/stdlib/Profile/test/runtests.jl
@@ -155,6 +155,20 @@ end
@test z == 10
end
+@testset "@profile no scope" begin
+ @profile no_scope_57858_1 = 1
+ @test @isdefined no_scope_57858_1
+ Profile.clear()
+
+ @profile_walltime no_scope_57858_1 = 1
+ @test @isdefined no_scope_57858_1
+ Profile.clear()
+
+ Profile.Allocs.@profile no_scope_57858_2 = 1
+ @test @isdefined no_scope_57858_2
+ Profile.Allocs.clear()
+end
+
@testset "setting sample count and delay in init" begin
n_, delay_ = Profile.init()
n_original = n_
diff --git a/stdlib/REPL/docs/src/index.md b/stdlib/REPL/docs/src/index.md
index eabd7e729280e..ddd0a0953fcfc 100644
--- a/stdlib/REPL/docs/src/index.md
+++ b/stdlib/REPL/docs/src/index.md
@@ -343,7 +343,15 @@ mapfoldl mapfoldr
When a single complete tab-complete result is available at the end of an input line and 2 or more characters
have been typed, a hint of the completion will show in a lighter color.
-This can be disabled via `Base.active_repl.options.hint_tab_completes = false`.
+This can be disabled via `Base.active_repl.options.hint_tab_completes = false` or by adding
+```
+atreplinit() do repl
+ if VERSION >= v"1.11.0-0"
+ repl.options.hint_tab_completes = false
+ end
+end
+```
+to your `~/.julia/config/startup.jl`.
!!! compat "Julia 1.11"
Tab-complete hinting was added in Julia 1.11
diff --git a/stdlib/REPL/src/LineEdit.jl b/stdlib/REPL/src/LineEdit.jl
index 1075aa648b926..53e497a6548ee 100644
--- a/stdlib/REPL/src/LineEdit.jl
+++ b/stdlib/REPL/src/LineEdit.jl
@@ -391,16 +391,21 @@ function complete_line(s::MIState)
end
end
+# Old complete_line return type: Vector{String}, String, Bool
+# New complete_line return type: NamedCompletion{String}, String, Bool
+# OR NamedCompletion{String}, Region, Bool
+#
# due to close coupling of the Pkg ReplExt `complete_line` can still return a vector of strings,
# so we convert those in this helper
-function complete_line_named(args...; kwargs...)::Tuple{Vector{NamedCompletion},String,Bool}
- result = complete_line(args...; kwargs...)::Union{Tuple{Vector{NamedCompletion},String,Bool},Tuple{Vector{String},String,Bool}}
- if result isa Tuple{Vector{NamedCompletion},String,Bool}
- return result
- else
- completions, partial, should_complete = result
- return map(NamedCompletion, completions), partial, should_complete
- end
+function complete_line_named(c, s, args...; kwargs...)::Tuple{Vector{NamedCompletion},Region,Bool}
+ r1, r2, should_complete = complete_line(c, s, args...; kwargs...)::Union{
+ Tuple{Vector{String}, String, Bool},
+ Tuple{Vector{NamedCompletion}, String, Bool},
+ Tuple{Vector{NamedCompletion}, Region, Bool},
+ }
+ completions = (r1 isa Vector{String} ? map(NamedCompletion, r1) : r1)
+ r = (r2 isa String ? (position(s)-sizeof(r2) => position(s)) : r2)
+ completions, r, should_complete
end
# checks for a hint and shows it if appropriate.
@@ -426,14 +431,14 @@ function check_show_hint(s::MIState)
return
end
t_completion = Threads.@spawn :default begin
- named_completions, partial, should_complete = nothing, nothing, nothing
+ named_completions, reg, should_complete = nothing, nothing, nothing
# only allow one task to generate hints at a time and check around lock
# if the user has pressed a key since the hint was requested, to skip old completions
next_key_pressed() && return
@lock s.hint_generation_lock begin
next_key_pressed() && return
- named_completions, partial, should_complete = try
+ named_completions, reg, should_complete = try
complete_line_named(st.p.complete, st, s.active_module; hint = true)
catch
lock_clear_hint()
@@ -448,21 +453,19 @@ function check_show_hint(s::MIState)
return
end
# Don't complete for single chars, given e.g. `x` completes to `xor`
- if length(partial) > 1 && should_complete
+ if reg.second - reg.first > 1 && should_complete
singlecompletion = length(completions) == 1
p = singlecompletion ? completions[1] : common_prefix(completions)
if singlecompletion || p in completions # i.e. complete `@time` even though `@time_imports` etc. exists
- # The completion `p` and the input `partial` may not share the same initial
+ # The completion `p` and the region `reg` may not share the same initial
# characters, for instance when completing to subscripts or superscripts.
# So, in general, make sure that the hint starts at the correct position by
# incrementing its starting position by as many characters as the input.
- startind = 1 # index of p from which to start providing the hint
- maxind = ncodeunits(p)
- for _ in partial
- startind = nextind(p, startind)
- startind > maxind && break
- end
+ maxind = lastindex(p)
+ startind = sizeof(content(s, reg))
if startind ≤ maxind # completion on a complete name returns itself so check that there's something to hint
+ # index of p from which to start providing the hint
+ startind = nextind(p, startind)
hint = p[startind:end]
next_key_pressed() && return
@lock s.line_modify_lock begin
@@ -491,7 +494,7 @@ function clear_hint(s::ModeState)
end
function complete_line(s::PromptState, repeats::Int, mod::Module; hint::Bool=false)
- completions, partial, should_complete = complete_line_named(s.p.complete, s, mod; hint)
+ completions, reg, should_complete = complete_line_named(s.p.complete, s, mod; hint)
isempty(completions) && return false
if !should_complete
# should_complete is false for cases where we only want to show
@@ -499,17 +502,16 @@ function complete_line(s::PromptState, repeats::Int, mod::Module; hint::Bool=fal
show_completions(s, completions)
elseif length(completions) == 1
# Replace word by completion
- prev_pos = position(s)
push_undo(s)
- edit_splice!(s, (prev_pos - sizeof(partial)) => prev_pos, completions[1].completion)
+ edit_splice!(s, reg, completions[1].completion)
else
p = common_prefix(completions)
+ partial = content(s, reg.first => min(bufend(s), reg.first + sizeof(p)))
if !isempty(p) && p != partial
# All possible completions share the same prefix, so we might as
- # well complete that
- prev_pos = position(s)
+ # well complete that.
push_undo(s)
- edit_splice!(s, (prev_pos - sizeof(partial)) => prev_pos, p)
+ edit_splice!(s, reg, p)
elseif repeats > 0
show_completions(s, completions)
end
@@ -830,12 +832,12 @@ function edit_move_right(m::MIState)
refresh_line(s)
return true
else
- completions, partial, should_complete = complete_line(s.p.complete, s, m.active_module)
- if should_complete && eof(buf) && length(completions) == 1 && length(partial) > 1
+ completions, reg, should_complete = complete_line(s.p.complete, s, m.active_module)
+ if should_complete && eof(buf) && length(completions) == 1 && reg.second - reg.first > 1
# Replace word by completion
prev_pos = position(s)
push_undo(s)
- edit_splice!(s, (prev_pos - sizeof(partial)) => prev_pos, completions[1])
+ edit_splice!(s, (prev_pos - reg.second + reg.first) => prev_pos, completions[1].completion)
refresh_line(state(s))
return true
else
@@ -2255,12 +2257,12 @@ setmodifiers!(c) = nothing
# Search Mode completions
function complete_line(s::SearchState, repeats, mod::Module; hint::Bool=false)
- completions, partial, should_complete = complete_line(s.histprompt.complete, s, mod; hint)
+ completions, reg, should_complete = complete_line(s.histprompt.complete, s, mod; hint)
# For now only allow exact completions in search mode
if length(completions) == 1
prev_pos = position(s)
push_undo(s)
- edit_splice!(s, (prev_pos - sizeof(partial)) => prev_pos, completions[1])
+ edit_splice!(s, (prev_pos - reg.second - reg.first) => prev_pos, completions[1].completion)
return true
end
return false
diff --git a/stdlib/REPL/src/REPL.jl b/stdlib/REPL/src/REPL.jl
index cc4f4f00cf8f6..66b46154e78f1 100644
--- a/stdlib/REPL/src/REPL.jl
+++ b/stdlib/REPL/src/REPL.jl
@@ -17,7 +17,7 @@ module REPL
Base.Experimental.@optlevel 1
Base.Experimental.@max_methods 1
-function UndefVarError_hint(io::IO, ex::UndefVarError)
+function UndefVarError_REPL_hint(io::IO, ex::UndefVarError)
var = ex.var
if var === :or
print(io, "\nSuggestion: Use `||` for short-circuiting boolean OR.")
@@ -30,67 +30,11 @@ function UndefVarError_hint(io::IO, ex::UndefVarError)
elseif var === :quit
print(io, "\nSuggestion: To exit Julia, use Ctrl-D, or type exit() and press enter.")
end
- if isdefined(ex, :scope)
- scope = ex.scope
- if scope isa Module
- bpart = Base.lookup_binding_partition(ex.world, GlobalRef(scope, var))
- kind = Base.binding_kind(bpart)
- if kind === Base.BINDING_KIND_GLOBAL || kind === Base.BINDING_KIND_UNDEF_CONST || kind == Base.BINDING_KIND_DECLARED
- print(io, "\nSuggestion: add an appropriate import or assignment. This global was declared but not assigned.")
- elseif kind === Base.BINDING_KIND_FAILED
- print(io, "\nHint: It looks like two or more modules export different ",
- "bindings with this name, resulting in ambiguity. Try explicitly ",
- "importing it from a particular module, or qualifying the name ",
- "with the module it should come from.")
- elseif kind === Base.BINDING_KIND_GUARD
- print(io, "\nSuggestion: check for spelling errors or missing imports.")
- elseif Base.is_some_imported(kind)
- print(io, "\nSuggestion: this global was defined as `$(Base.partition_restriction(bpart).globalref)` but not assigned a value.")
- end
- elseif scope === :static_parameter
- print(io, "\nSuggestion: run Test.detect_unbound_args to detect method arguments that do not fully constrain a type parameter.")
- elseif scope === :local
- print(io, "\nSuggestion: check for an assignment to a local variable that shadows a global of the same name.")
- end
- else
- scope = undef
- end
- if scope !== Base && !_UndefVarError_warnfor(io, Base, var)
- warned = false
- for m in Base.loaded_modules_order
- m === Core && continue
- m === Base && continue
- m === Main && continue
- m === scope && continue
- warned |= _UndefVarError_warnfor(io, m, var)
- end
- warned ||
- _UndefVarError_warnfor(io, Core, var) ||
- _UndefVarError_warnfor(io, Main, var)
- end
- return nothing
-end
-
-function _UndefVarError_warnfor(io::IO, m::Module, var::Symbol)
- Base.isbindingresolved(m, var) || return false
- (Base.isexported(m, var) || Base.ispublic(m, var)) || return false
- active_mod = Base.active_module()
- print(io, "\nHint: ")
- if isdefined(active_mod, Symbol(m))
- print(io, "a global variable of this name also exists in $m.")
- else
- if Symbol(m) == var
- print(io, "$m is loaded but not imported in the active module $active_mod.")
- else
- print(io, "a global variable of this name may be made accessible by importing $m in the current active module $active_mod")
- end
- end
- return true
end
function __init__()
Base.REPL_MODULE_REF[] = REPL
- Base.Experimental.register_error_hint(UndefVarError_hint, UndefVarError)
+ Base.Experimental.register_error_hint(UndefVarError_REPL_hint, UndefVarError)
return nothing
end
@@ -142,6 +86,7 @@ import .LineEdit:
PromptState,
mode_idx
+include("SyntaxUtil.jl")
include("REPLCompletions.jl")
using .REPLCompletions
@@ -176,6 +121,22 @@ mutable struct REPLBackend
end
REPLBackend() = REPLBackend(Channel(1), Channel(1), false)
+# A reference to a backend that is not mutable
+struct REPLBackendRef
+ repl_channel::Channel{Any}
+ response_channel::Channel{Any}
+end
+REPLBackendRef(backend::REPLBackend) = REPLBackendRef(backend.repl_channel, backend.response_channel)
+
+function destroy(ref::REPLBackendRef, state::Task)
+ if istaskfailed(state)
+ close(ref.repl_channel, TaskFailedException(state))
+ close(ref.response_channel, TaskFailedException(state))
+ end
+ close(ref.repl_channel)
+ close(ref.response_channel)
+end
+
"""
softscope(ex)
@@ -474,12 +435,23 @@ function repl_backend_loop(backend::REPLBackend, get_module::Function)
while true
tls = task_local_storage()
tls[:SOURCE_PATH] = nothing
- ast, show_value = take!(backend.repl_channel)
+ ast_or_func, show_value = take!(backend.repl_channel)
if show_value == -1
# exit flag
break
end
- eval_user_input(ast, backend, get_module())
+ if show_value == 2 # 2 indicates a function to be called
+ f = ast_or_func
+ try
+ ret = f()
+ put!(backend.response_channel, Pair{Any, Bool}(ret, false))
+ catch err
+ put!(backend.response_channel, Pair{Any, Bool}(err, true))
+ end
+ else
+ ast = ast_or_func
+ eval_user_input(ast, backend, get_module())
+ end
end
return nothing
end
@@ -502,6 +474,8 @@ function Base.showerror(io::IO, e::LimitIOException)
print(io, "$LimitIOException: aborted printing after attempting to print more than $(Base.format_bytes(e.maxbytes)) within a `LimitIO`.")
end
+Base.displaysize(io::LimitIO) = _displaysize(io.io)
+
function Base.write(io::LimitIO, v::UInt8)
io.n > io.maxbytes && throw(LimitIOException(io.maxbytes))
n_bytes = write(io.io, v)
@@ -582,7 +556,7 @@ function print_response(repl::AbstractREPL, response, show_value::Bool, have_col
repl.waserror = response[2]
with_repl_linfo(repl) do io
io = IOContext(io, :module => Base.active_module(repl)::Module)
- print_response(io, response, show_value, have_color, specialdisplay(repl))
+ print_response(io, response, backend(repl), show_value, have_color, specialdisplay(repl))
end
return nothing
end
@@ -599,7 +573,7 @@ function repl_display_error(errio::IO, @nospecialize errval)
return nothing
end
-function print_response(errio::IO, response, show_value::Bool, have_color::Bool, specialdisplay::Union{AbstractDisplay,Nothing}=nothing)
+function print_response(errio::IO, response, backend::Union{REPLBackendRef,Nothing}, show_value::Bool, have_color::Bool, specialdisplay::Union{AbstractDisplay,Nothing}=nothing)
Base.sigatomic_begin()
val, iserr = response
while true
@@ -611,15 +585,19 @@ function print_response(errio::IO, response, show_value::Bool, have_color::Bool,
repl_display_error(errio, val)
else
if val !== nothing && show_value
- try
- if specialdisplay === nothing
+ val2, iserr = if specialdisplay === nothing
+ # display calls may require being run on the main thread
+ eval_with_backend(backend) do
Base.invokelatest(display, val)
- else
+ end
+ else
+ eval_with_backend(backend) do
Base.invokelatest(display, specialdisplay, val)
end
- catch
+ end
+ if iserr
println(errio, "Error showing value of type ", typeof(val), ":")
- rethrow()
+ throw(val2)
end
end
end
@@ -649,21 +627,7 @@ function print_response(errio::IO, response, show_value::Bool, have_color::Bool,
nothing
end
-# A reference to a backend that is not mutable
-struct REPLBackendRef
- repl_channel::Channel{Any}
- response_channel::Channel{Any}
-end
-REPLBackendRef(backend::REPLBackend) = REPLBackendRef(backend.repl_channel, backend.response_channel)
-function destroy(ref::REPLBackendRef, state::Task)
- if istaskfailed(state)
- close(ref.repl_channel, TaskFailedException(state))
- close(ref.response_channel, TaskFailedException(state))
- end
- close(ref.repl_channel)
- close(ref.response_channel)
-end
"""
run_repl(repl::AbstractREPL)
@@ -838,27 +802,29 @@ end
beforecursor(buf::IOBuffer) = String(buf.data[1:buf.ptr-1])
+# Convert inclusive-inclusive 1-based char indexing to inclusive-exclusive byte Region.
+to_region(s, r) = first(r)-1 => (length(r) > 0 ? nextind(s, last(r))-1 : first(r)-1)
+
function complete_line(c::REPLCompletionProvider, s::PromptState, mod::Module; hint::Bool=false)
- partial = beforecursor(s.input_buffer)
full = LineEdit.input_string(s)
- ret, range, should_complete = completions(full, lastindex(partial), mod, c.modifiers.shift, hint)
+ ret, range, should_complete = completions(full, thisind(full, position(s)), mod, c.modifiers.shift, hint)
+ range = to_region(full, range)
c.modifiers = LineEdit.Modifiers()
- return unique!(LineEdit.NamedCompletion[named_completion(x) for x in ret]), partial[range], should_complete
+ return unique!(LineEdit.NamedCompletion[named_completion(x) for x in ret]), range, should_complete
end
function complete_line(c::ShellCompletionProvider, s::PromptState; hint::Bool=false)
- # First parse everything up to the current position
- partial = beforecursor(s.input_buffer)
full = LineEdit.input_string(s)
- ret, range, should_complete = shell_completions(full, lastindex(partial), hint)
- return unique!(LineEdit.NamedCompletion[named_completion(x) for x in ret]), partial[range], should_complete
+ ret, range, should_complete = shell_completions(full, thisind(full, position(s)), hint)
+ range = to_region(full, range)
+ return unique!(LineEdit.NamedCompletion[named_completion(x) for x in ret]), range, should_complete
end
function complete_line(c::LatexCompletions, s; hint::Bool=false)
- partial = beforecursor(LineEdit.buffer(s))
full = LineEdit.input_string(s)::String
- ret, range, should_complete = bslash_completions(full, lastindex(partial), hint)[2]
- return unique!(LineEdit.NamedCompletion[named_completion(x) for x in ret]), partial[range], should_complete
+ ret, range, should_complete = bslash_completions(full, thisind(full, position(s)), hint)[2]
+ range = to_region(full, range)
+ return unique!(LineEdit.NamedCompletion[named_completion(x) for x in ret]), range, should_complete
end
with_repl_linfo(f, repl) = f(outstream(repl))
@@ -1184,12 +1150,27 @@ find_hist_file() = get(ENV, "JULIA_HISTORY",
!isempty(DEPOT_PATH) ? joinpath(DEPOT_PATH[1], "logs", "repl_history.jl") :
error("DEPOT_PATH is empty and ENV[\"JULIA_HISTORY\"] not set."))
-backend(r::AbstractREPL) = r.backendref
+backend(r::AbstractREPL) = hasproperty(r, :backendref) ? r.backendref : nothing
+
-function eval_with_backend(ast, backend::REPLBackendRef)
- put!(backend.repl_channel, (ast, 1))
+function eval_with_backend(ast::Expr, backend::REPLBackendRef)
+ put!(backend.repl_channel, (ast, 1)) # (f, show_value)
return take!(backend.response_channel) # (val, iserr)
end
+function eval_with_backend(f, backend::REPLBackendRef)
+ put!(backend.repl_channel, (f, 2)) # (f, show_value) 2 indicates function (rather than ast)
+ return take!(backend.response_channel) # (val, iserr)
+end
+# if no backend just eval (used by tests)
+function eval_with_backend(f, backend::Nothing)
+ try
+ ret = f()
+ return (ret, false) # (val, iserr)
+ catch err
+ return (err, true)
+ end
+end
+
function respond(f, repl, main; pass_empty::Bool = false, suppress_on_semicolon::Bool = true)
return function do_respond(s::MIState, buf, ok::Bool)
diff --git a/stdlib/REPL/src/REPLCompletions.jl b/stdlib/REPL/src/REPLCompletions.jl
index eadc2672dd29b..5b211d95b5385 100644
--- a/stdlib/REPL/src/REPLCompletions.jl
+++ b/stdlib/REPL/src/REPLCompletions.jl
@@ -12,8 +12,10 @@ const CC = Base.Compiler
using Base.Meta
using Base: propertynames, something, IdSet
using Base.Filesystem: _readdirx
+using Base.JuliaSyntax: @K_str, @KSet_str, parseall, byte_range, children, is_prefix_call, is_trivia, kind
using ..REPL.LineEdit: NamedCompletion
+using ..REPL.SyntaxUtil: CursorNode, find_parent, seek_pos, char_range, char_last, children_nt, find_delim
abstract type Completion end
@@ -240,8 +242,7 @@ function complete_symbol!(suggestions::Vector{Completion},
return suggestions
end
-completes_module(mod::Module, x::Symbol) =
- Base.isbindingresolved(mod, x) && isdefined(mod, x) && isa(getglobal(mod, x), Module)
+completes_module(mod::Module, x::Symbol) = isdefined(mod, x) && isa(getglobal(mod, x), Module)
function add_field_completions!(suggestions::Vector{Completion}, name::String, @nospecialize(t))
if isa(t, Union)
@@ -303,9 +304,8 @@ const sorted_keyvals = ["false", "true"]
complete_keyval!(suggestions::Vector{Completion}, s::String) =
complete_from_list!(suggestions, KeyvalCompletion, sorted_keyvals, s)
-function do_raw_escape(s)
- # escape_raw_string with delim='`' and ignoring the rule for the ending \
- return replace(s, r"(\\+)`" => s"\1\\`")
+function do_cmd_escape(s)
+ return Base.escape_raw_string(Base.shell_escape_posixly(s), '`')
end
function do_shell_escape(s)
return Base.shell_escape_posixly(s)
@@ -313,6 +313,20 @@ end
function do_string_escape(s)
return escape_string(s, ('\"','$'))
end
+function do_string_unescape(s)
+ s = replace(s, "\\\$"=>"\$")
+ try
+ unescape_string(s)
+ catch e
+ e isa ArgumentError || rethrow()
+ s # it is unlikely, but if it isn't a valid string, maybe it was a valid path, and just needs escape_string called?
+ end
+end
+
+function joinpath_withsep(dir, path; dirsep)
+ dir == "" && return path
+ dir[end] == dirsep ? dir * path : dir * dirsep * path
+end
const PATH_cache_lock = Base.ReentrantLock()
const PATH_cache = Set{String}()
@@ -410,9 +424,10 @@ end
function complete_path(path::AbstractString;
use_envpath=false,
shell_escape=false,
- raw_escape=false,
+ cmd_escape=false,
string_escape=false,
- contract_user=false)
+ contract_user=false,
+ dirsep=Sys.iswindows() ? '\\' : '/')
@assert !(shell_escape && string_escape)
if Base.Sys.isunix() && occursin(r"^~(?:/|$)", path)
# if the path is just "~", don't consider the expanded username as a prefix
@@ -441,7 +456,7 @@ function complete_path(path::AbstractString;
for entry in entries
if startswith(entry.name, prefix)
is_dir = try isdir(entry) catch ex; ex isa Base.IOError ? false : rethrow() end
- push!(matches, is_dir ? entry.name * "/" : entry.name)
+ push!(matches, is_dir ? joinpath_withsep(entry.name, ""; dirsep) : entry.name)
end
end
@@ -457,7 +472,7 @@ function complete_path(path::AbstractString;
end
matches = ((shell_escape ? do_shell_escape(s) : string_escape ? do_string_escape(s) : s) for s in matches)
- matches = ((raw_escape ? do_raw_escape(s) : s) for s in matches)
+ matches = ((cmd_escape ? do_cmd_escape(s) : s) for s in matches)
matches = Completion[PathCompletion(contract_user ? contractuser(s) : s) for s in matches]
return matches, dir, !isempty(matches)
end
@@ -470,7 +485,8 @@ function complete_path(path::AbstractString,
contract_user=false)
## TODO: enable this depwarn once Pkg is fixed
#Base.depwarn("complete_path with pos argument is deprecated because the return value [2] is incorrect to use", :complete_path)
- paths, dir, success = complete_path(path; use_envpath, shell_escape, string_escape)
+ paths, dir, success = complete_path(path; use_envpath, shell_escape, string_escape, dirsep='/')
+
if Base.Sys.isunix() && occursin(r"^~(?:/|$)", path)
# if the path is just "~", don't consider the expanded username as a prefix
if path == "~"
@@ -491,91 +507,6 @@ function complete_path(path::AbstractString,
return paths, startpos:pos, success
end
-function complete_expanduser(path::AbstractString, r)
- expanded =
- try expanduser(path)
- catch e
- e isa ArgumentError || rethrow()
- path
- end
- return Completion[PathCompletion(expanded)], r, path != expanded
-end
-
-# Returns a range that includes the method name in front of the first non
-# closed start brace from the end of the string.
-function find_start_brace(s::AbstractString; c_start='(', c_end=')')
- r = reverse(s)
- i = firstindex(r)
- braces = in_comment = 0
- in_single_quotes = in_double_quotes = in_back_ticks = false
- num_single_quotes_in_string = count('\'', s)
- while i <= ncodeunits(r)
- c, i = iterate(r, i)
- if c == '#' && i <= ncodeunits(r) && iterate(r, i)[1] == '='
- c, i = iterate(r, i) # consume '='
- new_comments = 1
- # handle #=#=#=#, by counting =# pairs
- while i <= ncodeunits(r) && iterate(r, i)[1] == '#'
- c, i = iterate(r, i) # consume '#'
- iterate(r, i)[1] == '=' || break
- c, i = iterate(r, i) # consume '='
- new_comments += 1
- end
- if c == '='
- in_comment += new_comments
- else
- in_comment -= new_comments
- end
- elseif !in_single_quotes && !in_double_quotes && !in_back_ticks && in_comment == 0
- if c == c_start
- braces += 1
- elseif c == c_end
- braces -= 1
- elseif c == '\'' && num_single_quotes_in_string % 2 == 0
- # ' can be a transpose too, so check if there are even number of 's in the string
- # TODO: This probably needs to be more robust
- in_single_quotes = true
- elseif c == '"'
- in_double_quotes = true
- elseif c == '`'
- in_back_ticks = true
- end
- else
- if in_single_quotes &&
- c == '\'' && i <= ncodeunits(r) && iterate(r, i)[1] != '\\'
- in_single_quotes = false
- elseif in_double_quotes &&
- c == '"' && i <= ncodeunits(r) && iterate(r, i)[1] != '\\'
- in_double_quotes = false
- elseif in_back_ticks &&
- c == '`' && i <= ncodeunits(r) && iterate(r, i)[1] != '\\'
- in_back_ticks = false
- elseif in_comment > 0 &&
- c == '=' && i <= ncodeunits(r) && iterate(r, i)[1] == '#'
- # handle =#=#=#=, by counting #= pairs
- c, i = iterate(r, i) # consume '#'
- old_comments = 1
- while i <= ncodeunits(r) && iterate(r, i)[1] == '='
- c, i = iterate(r, i) # consume '='
- iterate(r, i)[1] == '#' || break
- c, i = iterate(r, i) # consume '#'
- old_comments += 1
- end
- if c == '#'
- in_comment -= old_comments
- else
- in_comment += old_comments
- end
- end
- end
- braces == 1 && break
- end
- braces != 1 && return 0:-1, -1
- method_name_end = reverseind(s, i)
- startind = nextind(s, something(findprev(in(non_identifier_chars), s, method_name_end), 0))::Int
- return (startind:lastindex(s), method_name_end)
-end
-
struct REPLCacheToken end
struct REPLInterpreter <: CC.AbstractInterpreter
@@ -643,20 +574,17 @@ end
function CC.abstract_eval_globalref(interp::REPLInterpreter, g::GlobalRef, bailed::Bool,
sv::CC.InferenceState)
# Ignore saw_latestworld
- partition = CC.abstract_eval_binding_partition!(interp, g, sv)
if (interp.limit_aggressive_inference ? is_repl_frame(sv) : is_call_graph_uncached(sv))
+ partition = CC.abstract_eval_binding_partition!(interp, g, sv)
if CC.is_defined_const_binding(CC.binding_kind(partition))
- return Pair{CC.RTEffects, Union{Nothing, Core.BindingPartition}}(
- CC.RTEffects(Const(CC.partition_restriction(partition)), Union{}, CC.EFFECTS_TOTAL), partition)
+ return CC.RTEffects(Const(CC.partition_restriction(partition)), Union{}, CC.EFFECTS_TOTAL)
else
b = convert(Core.Binding, g)
- if CC.binding_kind(partition) == CC.BINDING_KIND_GLOBAL && isdefined(b, :value)
- return Pair{CC.RTEffects, Union{Nothing, Core.BindingPartition}}(
- CC.RTEffects(Const(b.value), Union{}, CC.EFFECTS_TOTAL), partition)
+ if CC.binding_kind(partition) == CC.PARTITION_KIND_GLOBAL && isdefined(b, :value)
+ return CC.RTEffects(Const(b.value), Union{}, CC.EFFECTS_TOTAL)
end
end
- return Pair{CC.RTEffects, Union{Nothing, Core.BindingPartition}}(
- CC.RTEffects(Union{}, UndefVarError, CC.EFFECTS_THROWS), partition)
+ return CC.RTEffects(Union{}, UndefVarError, CC.EFFECTS_THROWS)
end
return @invoke CC.abstract_eval_globalref(interp::CC.AbstractInterpreter, g::GlobalRef, bailed::Bool,
sv::CC.InferenceState)
@@ -733,6 +661,7 @@ end
# lower `ex` and run type inference on the resulting top-level expression
function repl_eval_ex(@nospecialize(ex), context_module::Module; limit_aggressive_inference::Bool=false)
+ expr_has_error(ex) && return nothing
if (isexpr(ex, :toplevel) || isexpr(ex, :tuple)) && !isempty(ex.args)
# get the inference result for the last expression
ex = ex.args[end]
@@ -782,6 +711,7 @@ code_typed(CC.typeinf, (REPLInterpreter, CC.InferenceState))
# Method completion on function call expression that look like :(max(1))
MAX_METHOD_COMPLETIONS::Int = 40
function _complete_methods(ex_org::Expr, context_module::Module, shift::Bool)
+ isempty(ex_org.args) && return 2, nothing, [], Set{Symbol}()
funct = repl_eval_ex(ex_org.args[1], context_module)
funct === nothing && return 2, nothing, [], Set{Symbol}()
funct = CC.widenconst(funct)
@@ -799,24 +729,12 @@ function complete_methods(ex_org::Expr, context_module::Module=Main, shift::Bool
end
MAX_ANY_METHOD_COMPLETIONS::Int = 10
-function recursive_explore_names!(seen::IdSet, callee_module::Module, initial_module::Module, exploredmodules::IdSet{Module}=IdSet{Module}())
- push!(exploredmodules, callee_module)
- for name in names(callee_module; all=true, imported=true)
- if !Base.isdeprecated(callee_module, name) && !startswith(string(name), '#') && isdefined(initial_module, name)
- func = getfield(callee_module, name)
- if !isa(func, Module)
- funct = Core.Typeof(func)
- push!(seen, funct)
- elseif isa(func, Module) && func ∉ exploredmodules
- recursive_explore_names!(seen, func, initial_module, exploredmodules)
- end
- end
- end
-end
-function recursive_explore_names(callee_module::Module, initial_module::Module)
- seen = IdSet{Any}()
- recursive_explore_names!(seen, callee_module, initial_module)
- seen
+
+function accessible(mod::Module, private::Bool)
+ bindings = IdSet{Any}(Core.Typeof(getglobal(mod, s)) for s in names(mod; all=private, imported=private, usings=private)
+ if !Base.isdeprecated(mod, s) && !startswith(string(s), '#') && !startswith(string(s), '@') && isdefined(mod, s))
+ delete!(bindings, Module)
+ return collect(bindings)
end
function complete_any_methods(ex_org::Expr, callee_module::Module, context_module::Module, moreargs::Bool, shift::Bool)
@@ -834,7 +752,7 @@ function complete_any_methods(ex_org::Expr, callee_module::Module, context_modul
# semicolon for the ".?(" syntax
moreargs && push!(args_ex, Vararg{Any})
- for seen_name in recursive_explore_names(callee_module, callee_module)
+ for seen_name in accessible(callee_module, callee_module === context_module)
complete_methods!(out, seen_name, args_ex, kwargs_ex, MAX_ANY_METHOD_COMPLETIONS, false)
end
@@ -939,50 +857,6 @@ const subscript_regex = Regex("^\\\\_[" * join(isdigit(k) || isletter(k) ? "$k"
const superscripts = Dict(k[3]=>v[1] for (k,v) in latex_symbols if startswith(k, "\\^") && length(k)==3)
const superscript_regex = Regex("^\\\\\\^[" * join(isdigit(k) || isletter(k) ? "$k" : "\\$k" for k in keys(superscripts)) * "]+\\z")
-# Aux function to detect whether we're right after a using or import keyword
-function get_import_mode(s::String)
- # allow all of these to start with leading whitespace and macros like @eval and @eval(
- # ^\s*(?:@\w+\s*(?:\(\s*)?)?
-
- # match simple cases like `using |` and `import |`
- mod_import_match_simple = match(r"^\s*(?:@\w+\s*(?:\(\s*)?)?\b(using|import)\s*$", s)
- if mod_import_match_simple !== nothing
- if mod_import_match_simple[1] == "using"
- return :using_module
- else
- return :import_module
- end
- end
- # match module import statements like `using Foo|`, `import Foo, Bar|` and `using Foo.Bar, Baz, |`
- mod_import_match = match(r"^\s*(?:@\w+\s*(?:\(\s*)?)?\b(using|import)\s+([\w\.]+(?:\s*,\s*[\w\.]+)*),?\s*$", s)
- if mod_import_match !== nothing
- if mod_import_match.captures[1] == "using"
- return :using_module
- else
- return :import_module
- end
- end
- # now match explicit name import statements like `using Foo: |` and `import Foo: bar, baz|`
- name_import_match = match(r"^\s*(?:@\w+\s*(?:\(\s*)?)?\b(using|import)\s+([\w\.]+)\s*:\s*([\w@!\s,]+)$", s)
- if name_import_match !== nothing
- if name_import_match[1] == "using"
- return :using_name
- else
- return :import_name
- end
- end
- return nothing
-end
-
-function close_path_completion(dir, path, str, pos)
- path = unescape_string(replace(path, "\\\$"=>"\$"))
- path = joinpath(dir, path)
- # ...except if it's a directory...
- Base.isaccessibledir(path) && return false
- # ...and except if there's already a " at the cursor.
- return lastindex(str) <= pos || str[nextind(str, pos)] != '"'
-end
-
function bslash_completions(string::String, pos::Int, hint::Bool=false)
slashpos = something(findprev(isequal('\\'), string, pos), 0)
if (something(findprev(in(bslash_separators), string, pos), 0) < slashpos &&
@@ -1010,29 +884,7 @@ function bslash_completions(string::String, pos::Int, hint::Bool=false)
completions = Completion[BslashCompletion(name, "$(symbol_dict[name]) $name") for name in sort!(collect(namelist))]
return (true, (completions, slashpos:pos, true))
end
- return (false, (Completion[], 0:-1, false))
-end
-
-function dict_identifier_key(str::String, tag::Symbol, context_module::Module=Main)
- if tag === :string
- str_close = str*"\""
- elseif tag === :cmd
- str_close = str*"`"
- else
- str_close = str
- end
- frange, end_of_identifier = find_start_brace(str_close, c_start='[', c_end=']')
- isempty(frange) && return (nothing, nothing, nothing)
- objstr = str[1:end_of_identifier]
- objex = Meta.parse(objstr, raise=false, depwarn=false)
- objt = repl_eval_ex(objex, context_module)
- isa(objt, Core.Const) || return (nothing, nothing, nothing)
- obj = objt.val
- isa(obj, AbstractDict) || return (nothing, nothing, nothing)
- (Base.haslength(obj) && length(obj)::Int < 1_000_000) || return (nothing, nothing, nothing)
- begin_of_key = something(findnext(!isspace, str, nextind(str, end_of_identifier) + 1), # +1 for [
- lastindex(str)+1)
- return (obj, str[begin_of_key:end], begin_of_key)
+ return (false, (Completion[], 1:0, false))
end
# This needs to be a separate non-inlined function, see #19441
@@ -1045,45 +897,12 @@ end
return matches
end
-# Identify an argument being completed in a method call. If the argument is empty, method
-# suggestions will be provided instead of argument completions.
-function identify_possible_method_completion(partial, last_idx)
- fail = 0:-1, Expr(:nothing), 0:-1, 0
-
- # First, check that the last punctuation is either ',', ';' or '('
- idx_last_punct = something(findprev(x -> ispunct(x) && x != '_' && x != '!', partial, last_idx), 0)::Int
- idx_last_punct == 0 && return fail
- last_punct = partial[idx_last_punct]
- last_punct == ',' || last_punct == ';' || last_punct == '(' || return fail
-
- # Then, check that `last_punct` is only followed by an identifier or nothing
- before_last_word_start = something(findprev(in(non_identifier_chars), partial, last_idx), 0)
- before_last_word_start == 0 && return fail
- all(isspace, @view partial[nextind(partial, idx_last_punct):before_last_word_start]) || return fail
-
- # Check that `last_punct` is either the last '(' or placed after a previous '('
- frange, method_name_end = find_start_brace(@view partial[1:idx_last_punct])
- method_name_end ∈ frange || return fail
-
- # Strip the preceding ! operators, if any, and close the expression with a ')'
- s = replace(partial[frange], r"\G\!+([^=\(]+)" => s"\1"; count=1) * ')'
- ex = Meta.parse(s, raise=false, depwarn=false)
- isa(ex, Expr) || return fail
-
- # `wordrange` is the position of the last argument to complete
- wordrange = nextind(partial, before_last_word_start):last_idx
- return frange, ex, wordrange, method_name_end
-end
-
# Provide completion for keyword arguments in function calls
-function complete_keyword_argument(partial::String, last_idx::Int, context_module::Module;
- shift::Bool=false)
- frange, ex, wordrange, = identify_possible_method_completion(partial, last_idx)
- fail = Completion[], 0:-1, frange
- ex.head === :call || is_broadcasting_expr(ex) || return fail
-
+function complete_keyword_argument!(suggestions::Vector{Completion},
+ ex::Expr, last_word::String,
+ context_module::Module; shift::Bool=false)
kwargs_flag, funct, args_ex, kwargs_ex = _complete_methods(ex, context_module, true)::Tuple{Int, Any, Vector{Any}, Set{Symbol}}
- kwargs_flag == 2 && return fail # one of the previous kwargs is invalid
+ kwargs_flag == 2 && false # one of the previous kwargs is invalid
methods = Completion[]
complete_methods!(methods, funct, Any[Vararg{Any}], kwargs_ex, shift ? -1 : MAX_METHOD_COMPLETIONS, kwargs_flag == 1)
@@ -1095,7 +914,6 @@ function complete_keyword_argument(partial::String, last_idx::Int, context_modul
# previously in the expression. The corresponding suggestion is "kwname=".
# If the keyword corresponds to an existing name, also include "kwname" as a suggestion
# since the syntax "foo(; kwname)" is equivalent to "foo(; kwname=kwname)".
- last_word = partial[wordrange] # the word to complete
kwargs = Set{String}()
for m in methods
# if MAX_METHOD_COMPLETIONS is hit a single TextCompletion is return by complete_methods! with an explanation
@@ -1106,22 +924,18 @@ function complete_keyword_argument(partial::String, last_idx::Int, context_modul
current_kwarg_candidates = String[]
for _kw in possible_kwargs
kw = String(_kw)
- if !endswith(kw, "...") && startswith(kw, last_word) && _kw ∉ kwargs_ex
+ # HACK: Should consider removing current arg from AST.
+ if !endswith(kw, "...") && startswith(kw, last_word) && (_kw ∉ kwargs_ex || kw == last_word)
push!(current_kwarg_candidates, kw)
end
end
union!(kwargs, current_kwarg_candidates)
end
- suggestions = Completion[KeywordArgumentCompletion(kwarg) for kwarg in kwargs]
-
- # Only add these if not in kwarg space. i.e. not in `foo(; `
- if kwargs_flag == 0
- complete_symbol!(suggestions, #=prefix=#nothing, last_word, context_module; shift)
- complete_keyval!(suggestions, last_word)
+ for kwarg in kwargs
+ push!(suggestions, KeywordArgumentCompletion(kwarg))
end
-
- return sort!(suggestions, by=named_completion_completion), wordrange
+ return kwargs_flag != 0
end
function get_loading_candidates(pkgstarts::String, project_file::String)
@@ -1140,411 +954,368 @@ function get_loading_candidates(pkgstarts::String, project_file::String)
return loading_candidates
end
-function complete_loading_candidates!(suggestions::Vector{Completion}, pkgstarts::String, project_file::String)
- for name in get_loading_candidates(pkgstarts, project_file)
- push!(suggestions, PackageCompletion(name))
+function complete_loading_candidates!(suggestions::Vector{Completion}, s::String)
+ for name in ("Core", "Base")
+ startswith(name, s) && push!(suggestions, PackageCompletion(name))
end
- return suggestions
-end
-function complete_identifiers!(suggestions::Vector{Completion},
- context_module::Module, string::String, name::String,
- pos::Int, separatorpos::Int, startpos::Int;
- comp_keywords::Bool=false,
- complete_modules_only::Bool=false,
- shift::Bool=false)
- if comp_keywords
- complete_keyword!(suggestions, name)
- complete_keyval!(suggestions, name)
- end
- if separatorpos > 1 && (string[separatorpos] == '.' || string[separatorpos] == ':')
- s = string[1:prevind(string, separatorpos)]
- # First see if the whole string up to `pos` is a valid expression. If so, use it.
- prefix = Meta.parse(s, raise=false, depwarn=false)
- if isexpr(prefix, :incomplete)
- s = string[startpos:pos]
- # Heuristic to find the start of the expression. TODO: This would be better
- # done with a proper error-recovering parser.
- if 0 < startpos <= lastindex(string) && string[startpos] == '.'
- i = prevind(string, startpos)
- while 0 < i
- c = string[i]
- if c in (')', ']')
- if c == ')'
- c_start = '('
- c_end = ')'
- elseif c == ']'
- c_start = '['
- c_end = ']'
- end
- frange, end_of_identifier = find_start_brace(string[1:prevind(string, i)], c_start=c_start, c_end=c_end)
- isempty(frange) && break # unbalanced parens
- startpos = first(frange)
- i = prevind(string, startpos)
- elseif c in ('\'', '\"', '\`')
- s = "$c$c"*string[startpos:pos]
- break
+ # If there's no dot, we're in toplevel, so we should
+ # also search for packages
+ for dir in Base.load_path()
+ if basename(dir) in Base.project_names && isfile(dir)
+ for name in get_loading_candidates(s, dir)
+ push!(suggestions, PackageCompletion(name))
+ end
+ end
+ isdir(dir) || continue
+ for entry in _readdirx(dir)
+ pname = entry.name
+ if pname[1] != '.' && pname != "METADATA" &&
+ pname != "REQUIRE" && startswith(pname, s)
+ # Valid file paths are
+ # .jl
+ # /src/.jl
+ # .jl/src/.jl
+ if isfile(entry)
+ endswith(pname, ".jl") && push!(suggestions,
+ PackageCompletion(pname[1:prevind(pname, end-2)]))
+ else
+ mod_name = if endswith(pname, ".jl")
+ pname[1:prevind(pname, end-2)]
else
- break
+ pname
end
- s = string[startpos:pos]
- end
- end
- if something(findlast(in(non_identifier_chars), s), 0) < something(findlast(isequal('.'), s), 0)
- lookup_name, name = rsplit(s, ".", limit=2)
- name = String(name)
- prefix = Meta.parse(lookup_name, raise=false, depwarn=false)
- end
- isexpr(prefix, :incomplete) && (prefix = nothing)
- elseif isexpr(prefix, (:using, :import))
- arglast = prefix.args[end] # focus on completion to the last argument
- if isexpr(arglast, :.)
- # We come here for cases like:
- # - `string`: "using Mod1.Mod2.M"
- # - `ex`: :(using Mod1.Mod2)
- # - `name`: "M"
- # Now we transform `ex` to `:(Mod1.Mod2)` to allow `complete_symbol!` to
- # complete for inner modules whose name starts with `M`.
- # Note that `complete_modules_only=true` is set within `completions`
- prefix = nothing
- firstdot = true
- for arg = arglast.args
- if arg === :.
- # override `context_module` if multiple `.` accessors are used
- if firstdot
- firstdot = false
- else
- context_module = parentmodule(context_module)
- end
- elseif arg isa Symbol
- if prefix === nothing
- prefix = arg
- else
- prefix = Expr(:., prefix, QuoteNode(arg))
- end
- else # invalid expression
- prefix = nothing
- break
+ if isfile(joinpath(entry, "src",
+ "$mod_name.jl"))
+ push!(suggestions, PackageCompletion(mod_name))
end
end
end
- elseif isexpr(prefix, :call) && length(prefix.args) > 1
- isinfix = s[end] != ')'
- # A complete call expression that does not finish with ')' is an infix call.
- if !isinfix
- # Handle infix call argument completion of the form bar + foo(qux).
- frange, end_of_identifier = find_start_brace(@view s[1:prevind(s, end)])
- if !isempty(frange) # if find_start_brace fails to find the brace just continue
- isinfix = Meta.parse(@view(s[frange[1]:end]), raise=false, depwarn=false) == prefix.args[end]
- end
- end
- if isinfix
- prefix = prefix.args[end]
- end
- elseif isexpr(prefix, :macrocall) && length(prefix.args) > 1
- # allow symbol completions within potentially incomplete macrocalls
- if s[end] ≠ '`' && s[end] ≠ ')'
- prefix = prefix.args[end]
- end
end
- else
- prefix = nothing
end
- complete_symbol!(suggestions, prefix, name, context_module; complete_modules_only, shift)
- return suggestions
end
function completions(string::String, pos::Int, context_module::Module=Main, shift::Bool=true, hint::Bool=false)
- # First parse everything up to the current position
- partial = string[1:pos]
- inc_tag = Base.incomplete_tag(Meta.parse(partial, raise=false, depwarn=false))
-
- if !hint # require a tab press for completion of these
- # ?(x, y)TAB lists methods you can call with these objects
- # ?(x, y TAB lists methods that take these objects as the first two arguments
- # MyModule.?(x, y)TAB restricts the search to names in MyModule
- rexm = match(r"(\w+\.|)\?\((.*)$", partial)
- if rexm !== nothing
- # Get the module scope
- if isempty(rexm.captures[1])
- callee_module = context_module
- else
- modname = Symbol(rexm.captures[1][1:end-1])
- if isdefined(context_module, modname)
- callee_module = getfield(context_module, modname)
- if !isa(callee_module, Module)
- callee_module = context_module
- end
- else
- callee_module = context_module
- end
- end
- moreargs = !endswith(rexm.captures[2], ')')
- callstr = "_(" * rexm.captures[2]
- if moreargs
- callstr *= ')'
- end
- ex_org = Meta.parse(callstr, raise=false, depwarn=false)
- if isa(ex_org, Expr)
- return complete_any_methods(ex_org, callee_module::Module, context_module, moreargs, shift), (0:length(rexm.captures[1])+1) .+ rexm.offset, false
- end
- end
- end
+ # filename needs to be string so macro can be evaluated
+ node = parseall(CursorNode, string, ignore_errors=true, keep_parens=true, filename="none")
+ cur = @something seek_pos(node, pos) node
- # if completing a key in a Dict
- identifier, partial_key, loc = dict_identifier_key(partial, inc_tag, context_module)
- if identifier !== nothing
- matches = find_dict_matches(identifier, partial_key)
- length(matches)==1 && (lastindex(string) <= pos || string[nextind(string,pos)] != ']') && (matches[1]*=']')
- length(matches)>0 && return Completion[DictCompletion(identifier, match) for match in sort!(matches)], loc::Int:pos, true
- end
+ # Back up before whitespace to get a more useful AST node.
+ pos_not_ws = findprev(!isspace, string, pos)
+ cur_not_ws = something(seek_pos(node, pos_not_ws), node)
suggestions = Completion[]
+ sort_suggestions() = sort!(unique!(named_completion, suggestions), by=named_completion_completion)
+
+ # Search for methods (requires tab press):
+ # ?(x, y)TAB lists methods you can call with these objects
+ # ?(x, y TAB lists methods that take these objects as the first two arguments
+ # MyModule.?(x, y)TAB restricts the search to names in MyModule
+ if !hint
+ cs = method_search(view(string, 1:pos), context_module, shift)
+ cs !== nothing && return cs
+ end
- # Check if this is a var"" string macro that should be completed like
- # an identifier rather than a string.
- # TODO: It would be nice for the parser to give us more information here
- # so that we can lookup the macro by identity rather than pattern matching
- # its invocation.
- varrange = findprev("var\"", string, pos)
-
- expanded = nothing
- was_expanded = false
-
- if varrange !== nothing
- ok, ret = bslash_completions(string, pos)
- ok && return ret
- startpos = first(varrange) + 4
- separatorpos = something(findprev(isequal('.'), string, first(varrange)-1), 0)
- name = string[startpos:pos]
- complete_identifiers!(suggestions, context_module, string, name,
- pos, separatorpos, startpos;
- shift)
- return sort!(unique!(named_completion, suggestions), by=named_completion_completion), (separatorpos+1):pos, true
- elseif inc_tag === :cmd
- # TODO: should this call shell_completions instead of partially reimplementing it?
- let m = match(r"[\t\n\r\"`><=*?|]| (?!\\)", reverse(partial)) # fuzzy shell_parse in reverse
- startpos = nextind(partial, reverseind(partial, m.offset))
- r = startpos:pos
- scs::String = string[r]
-
- expanded = complete_expanduser(scs, r)
- was_expanded = expanded[3]
- if was_expanded
- scs = (only(expanded[1])::PathCompletion).path
- # If tab press, ispath and user expansion available, return it now
- # otherwise see if we can complete the path further before returning with expanded ~
- !hint && ispath(scs) && return expanded::Completions
- end
-
- path::String = replace(scs, r"(\\+)\g1(\\?)`" => "\1\2`") # fuzzy unescape_raw_string: match an even number of \ before ` and replace with half as many
- # This expansion with "\\ "=>' ' replacement and shell_escape=true
- # assumes the path isn't further quoted within the cmd backticks.
- path = replace(path, r"\\ " => " ", r"\$" => "\$") # fuzzy shell_parse (reversed by shell_escape_posixly)
- paths, dir, success = complete_path(path, shell_escape=true, raw_escape=true)
-
- if success && !isempty(dir)
- let dir = do_raw_escape(do_shell_escape(dir))
- # if escaping of dir matches scs prefix, remove that from the completions
- # otherwise make it the whole completion
- if endswith(dir, "/") && startswith(scs, dir)
- r = (startpos + sizeof(dir)):pos
- elseif startswith(scs, dir * "/")
- r = nextind(string, startpos + sizeof(dir)):pos
- else
- map!(paths, paths) do c::PathCompletion
- p = dir * "/" * c.path
- was_expanded && (p = contractuser(p))
- return PathCompletion(p)
- end
- end
- end
- end
- if isempty(paths) && !hint && was_expanded
- # if not able to provide completions, not hinting, and ~ expansion was possible, return ~ expansion
- return expanded::Completions
- else
- return sort!(paths, by=p->p.path), r::UnitRange{Int}, success
+ # Complete keys in a Dict:
+ # my_dict[ TAB
+ n, key, closed = find_ref_key(cur_not_ws, pos)
+ if n !== nothing
+ key::UnitRange{Int}
+ obj = dict_eval(Expr(n), context_module)
+ if obj !== nothing
+ # Skip leading whitespace inside brackets.
+ i = @something findnext(!isspace, string, first(key)) nextind(string, last(key))
+ key = i:last(key)
+ s = string[intersect(key, 1:pos)]
+ matches = find_dict_matches(obj, s)
+ length(matches) == 1 && !closed && (matches[1] *= ']')
+ if length(matches) > 0
+ ret = Completion[DictCompletion(obj, match) for match in sort!(matches)]
+ return ret, key, true
end
end
- elseif inc_tag === :string
- # Find first non-escaped quote
- let m = match(r"\"(?!\\)", reverse(partial))
- startpos = nextind(partial, reverseind(partial, m.offset))
- r = startpos:pos
- scs::String = string[r]
-
- expanded = complete_expanduser(scs, r)
- was_expanded = expanded[3]
- if was_expanded
- scs = (only(expanded[1])::PathCompletion).path
- # If tab press, ispath and user expansion available, return it now
- # otherwise see if we can complete the path further before returning with expanded ~
- !hint && ispath(scs) && return expanded::Completions
- end
-
- path = try
- unescape_string(replace(scs, "\\\$"=>"\$"))
- catch ex
- ex isa ArgumentError || rethrow()
- nothing
- end
- if !isnothing(path)
- paths, dir, success = complete_path(path::String, string_escape=true)
-
- if length(paths) == 1
- p = (paths[1]::PathCompletion).path
- hint && was_expanded && (p = contractuser(p))
- if close_path_completion(dir, p, path, pos)
- paths[1] = PathCompletion(p * "\"")
- end
- end
+ end
- if success && !isempty(dir)
- let dir = do_string_escape(dir)
- # if escaping of dir matches scs prefix, remove that from the completions
- # otherwise make it the whole completion
- if endswith(dir, "/") && startswith(scs, dir)
- r = (startpos + sizeof(dir)):pos
- elseif startswith(scs, dir * "/") && dir != dirname(homedir())
- was_expanded && (dir = contractuser(dir))
- r = nextind(string, startpos + sizeof(dir)):pos
- else
- map!(paths, paths) do c::PathCompletion
- p = dir * "/" * c.path
- hint && was_expanded && (p = contractuser(p))
- return PathCompletion(p)
- end
- end
- end
- end
+ # Complete Cmd strings:
+ # `fil TAB => `file
+ # `file ~/exa TAB => `file ~/example.txt
+ # `file ~/example.txt TAB => `file /home/user/example.txt
+ if (n = find_parent(cur, K"CmdString")) !== nothing
+ off = n.position - 1
+ ret, r, success = shell_completions(string[char_range(n)], pos - off, hint, cmd_escape=true)
+ success && return ret, r .+ off, success
+ end
- # Fallthrough allowed so that Latex symbols can be completed in strings
- if success
- return sort!(paths, by=p->p.path), r::UnitRange{Int}, success
- elseif !hint && was_expanded
- # if not able to provide completions, not hinting, and ~ expansion was possible, return ~ expansion
- return expanded::Completions
- end
- end
+ # Complete ordinary strings:
+ # "~/exa TAB => "~/example.txt"
+ # "~/example.txt TAB => "/home/user/example.txt"
+ r, closed = find_str(cur)
+ if r !== nothing
+ s = do_string_unescape(string[r])
+ ret, success = complete_path_string(s, hint; string_escape=true,
+ dirsep=Sys.iswindows() ? '\\' : '/')
+ if length(ret) == 1 && !closed && close_path_completion(ret[1].path)
+ ret[1] = PathCompletion(ret[1].path * '"')
end
+ success && return ret, r, success
end
- # if path has ~ and we didn't find any paths to complete just return the expanded path
- was_expanded && return expanded::Completions
+ # Backlash symbols:
+ # \pi => π
+ # Comes after string completion so backslash escapes are not misinterpreted.
ok, ret = bslash_completions(string, pos)
ok && return ret
- # Make sure that only bslash_completions is working on strings
- inc_tag === :string && return Completion[], 0:-1, false
- if inc_tag === :other
- frange, ex, wordrange, method_name_end = identify_possible_method_completion(partial, pos)
- if last(frange) != -1 && all(isspace, @view partial[wordrange]) # no last argument to complete
- if ex.head === :call
- return complete_methods(ex, context_module, shift), first(frange):method_name_end, false
- elseif is_broadcasting_expr(ex)
- return complete_methods(ex, context_module, shift), first(frange):(method_name_end - 1), false
- end
+ # Don't fall back to symbol completion inside strings or comments.
+ inside_cmdstr = find_parent(cur, K"cmdstring") !== nothing
+ (kind(cur) in KSet"String Comment ErrorEofMultiComment" || inside_cmdstr) &&
+ return Completion[], 1:0, false
+
+ if (n = find_prefix_call(cur_not_ws)) !== nothing
+ func = first(children_nt(n))
+ e = Expr(n)
+ # Remove arguments past the first parse error (allows unclosed parens)
+ if is_broadcasting_expr(e)
+ i = findfirst(x -> x isa Expr && x.head == :error, e.args[2].args)
+ i !== nothing && deleteat!(e.args[2].args, i:lastindex(e.args[2].args))
+ else
+ i = findfirst(x -> x isa Expr && x.head == :error, e.args)
+ i !== nothing && deleteat!(e.args, i:lastindex(e.args))
end
- elseif inc_tag === :comment
- return Completion[], 0:-1, false
- end
- # Check whether we can complete a keyword argument in a function call
- kwarg_completion, wordrange = complete_keyword_argument(partial, pos, context_module; shift)
- isempty(wordrange) || return kwarg_completion, wordrange, !isempty(kwarg_completion)
+ # Method completion:
+ # foo( TAB => list of method signatures for foo
+ # foo(x, TAB => list of methods signatures for foo with x as first argument
+ if kind(cur_not_ws) in KSet"( , ;"
+ # Don't provide method completions unless the cursor is after: '(' ',' ';'
+ return complete_methods(e, context_module, shift), char_range(func), false
+
+ # Keyword argument completion:
+ # foo(ar TAB => keyword arguments like `arg1=`
+ elseif kind(cur) == K"Identifier"
+ r = char_range(cur)
+ s = string[intersect(r, 1:pos)]
+ # Return without adding more suggestions if kwargs only
+ complete_keyword_argument!(suggestions, e, s, context_module; shift) &&
+ return sort_suggestions(), r, true
+ end
+ end
- startpos = nextind(string, something(findprev(in(non_identifier_chars), string, pos), 0))
- # strip preceding ! operator
- if (m = match(r"\G\!+", partial, startpos)) isa RegexMatch
- startpos += length(m.match)
+ # Symbol completion
+ # TODO: Should completions replace the identifier at the cursor?
+ if cur.parent !== nothing && kind(cur.parent) == K"var"
+ # Replace the entire var"foo", but search using only "foo".
+ r = intersect(char_range(cur.parent), 1:pos)
+ r2 = char_range(children_nt(cur.parent)[1])
+ s = string[intersect(r2, 1:pos)]
+ elseif kind(cur) in KSet"Identifier @"
+ r = intersect(char_range(cur), 1:pos)
+ s = string[r]
+ elseif kind(cur) == K"MacroName"
+ # Include the `@`
+ r = intersect(prevind(string, cur.position):char_last(cur), 1:pos)
+ s = string[r]
+ else
+ r = nextind(string, pos):pos
+ s = ""
end
- separatorpos = something(findprev(isequal('.'), string, pos), 0)
- namepos = max(startpos, separatorpos+1)
- name = string[namepos:pos]
- import_mode = get_import_mode(string)
- if import_mode === :using_module || import_mode === :import_module
+ complete_modules_only = false
+ prefix = node_prefix(cur, context_module)
+ comp_keywords = prefix === nothing
+
+ # Complete loadable module names:
+ # import Mod TAB
+ # import Mod1, Mod2 TAB
+ # using Mod TAB
+ if (n = find_parent(cur, K"importpath")) !== nothing
# Given input lines like `using Foo|`, `import Foo, Bar|` and `using Foo.Bar, Baz, |`:
# Let's look only for packages and modules we can reach from here
+ if prefix == nothing
+ complete_loading_candidates!(suggestions, s)
+ return sort_suggestions(), r, true
+ end
- # If there's no dot, we're in toplevel, so we should
- # also search for packages
- s = string[startpos:pos]
- if separatorpos <= startpos
- for dir in Base.load_path()
- if basename(dir) in Base.project_names && isfile(dir)
- complete_loading_candidates!(suggestions, s, dir)
- end
- isdir(dir) || continue
- for entry in _readdirx(dir)
- pname = entry.name
- if pname[1] != '.' && pname != "METADATA" &&
- pname != "REQUIRE" && startswith(pname, s)
- # Valid file paths are
- # .jl
- #