Unnamed repository; edit this file 'description' to name the repository.
Diffstat (limited to 'crates/hir-expand/src/db.rs')
-rw-r--r--crates/hir-expand/src/db.rs532
1 files changed, 268 insertions, 264 deletions
diff --git a/crates/hir-expand/src/db.rs b/crates/hir-expand/src/db.rs
index 6f69ee15ac..ec68f2f96e 100644
--- a/crates/hir-expand/src/db.rs
+++ b/crates/hir-expand/src/db.rs
@@ -3,21 +3,19 @@
use base_db::{salsa, CrateId, FileId, SourceDatabase};
use either::Either;
use limit::Limit;
-use mbe::{syntax_node_to_token_tree, ValueResult};
+use mbe::syntax_node_to_token_tree;
use rustc_hash::FxHashSet;
-use span::{AstIdMap, SyntaxContextData, SyntaxContextId};
-use syntax::{
- ast::{self, HasAttrs},
- AstNode, Parse, SyntaxError, SyntaxNode, SyntaxToken, T,
-};
+use span::{AstIdMap, Span, SyntaxContextData, SyntaxContextId};
+use syntax::{ast, AstNode, Parse, SyntaxElement, SyntaxError, SyntaxNode, SyntaxToken, T};
use triomphe::Arc;
use crate::{
- attrs::collect_attrs,
+ attrs::{collect_attrs, AttrId},
builtin_attr_macro::pseudo_derive_attr_expansion,
builtin_fn_macro::EagerExpander,
+ cfg_process,
declarative::DeclarativeMacroExpander,
- fixup::{self, reverse_fixups, SyntaxFixupUndoInfo},
+ fixup::{self, SyntaxFixupUndoInfo},
hygiene::{span_with_call_site_ctxt, span_with_def_site_ctxt, span_with_mixed_site_ctxt},
proc_macro::ProcMacros,
span_map::{RealSpanMap, SpanMap, SpanMapRef},
@@ -100,10 +98,7 @@ pub trait ExpandDatabase: SourceDatabase {
/// Lowers syntactic macro call to a token tree representation. That's a firewall
/// query, only typing in the macro call itself changes the returned
/// subtree.
- fn macro_arg(
- &self,
- id: MacroCallId,
- ) -> ValueResult<(Arc<tt::Subtree>, SyntaxFixupUndoInfo), Arc<Box<[SyntaxError]>>>;
+ fn macro_arg(&self, id: MacroCallId) -> (Arc<tt::Subtree>, SyntaxFixupUndoInfo, Span);
/// Fetches the expander for this macro.
#[salsa::transparent]
#[salsa::invoke(TokenExpander::macro_expander)]
@@ -120,6 +115,12 @@ pub trait ExpandDatabase: SourceDatabase {
/// non-determinism breaks salsa in a very, very, very bad way.
/// @edwin0cheng heroically debugged this once! See #4315 for details
fn expand_proc_macro(&self, call: MacroCallId) -> ExpandResult<Arc<tt::Subtree>>;
+ /// Retrieves the span to be used for a proc-macro expansions spans.
+ /// This is a firewall query as it requires parsing the file, which we don't want proc-macros to
+ /// directly depend on as that would cause to frequent invalidations, mainly because of the
+ /// parse queries being LRU cached. If they weren't the invalidations would only happen if the
+ /// user wrote in the file that defines the proc-macro.
+ fn proc_macro_span(&self, fun: AstId<ast::Fn>) -> Span;
/// Firewall query that returns the errors from the `parse_macro_expansion` query.
fn parse_macro_expansion_error(
&self,
@@ -139,30 +140,50 @@ pub fn expand_speculative(
) -> Option<(SyntaxNode, SyntaxToken)> {
let loc = db.lookup_intern_macro_call(actual_macro_call);
+ // FIXME: This BOGUS here is dangerous once the proc-macro server can call back into the database!
let span_map = RealSpanMap::absolute(FileId::BOGUS);
let span_map = SpanMapRef::RealSpanMap(&span_map);
+ let (_, _, span) = db.macro_arg(actual_macro_call);
+
// Build the subtree and token mapping for the speculative args
let (mut tt, undo_info) = match loc.kind {
MacroCallKind::FnLike { .. } => (
- mbe::syntax_node_to_token_tree(speculative_args, span_map, loc.call_site),
+ mbe::syntax_node_to_token_tree(speculative_args, span_map, span),
+ SyntaxFixupUndoInfo::NONE,
+ ),
+ MacroCallKind::Attr { .. } if loc.def.is_attribute_derive() => (
+ mbe::syntax_node_to_token_tree(speculative_args, span_map, span),
SyntaxFixupUndoInfo::NONE,
),
- MacroCallKind::Derive { .. } | MacroCallKind::Attr { .. } => {
- let censor = censor_for_macro_input(&loc, speculative_args);
- let mut fixups = fixup::fixup_syntax(span_map, speculative_args, loc.call_site);
+ MacroCallKind::Derive { derive_attr_index: index, .. }
+ | MacroCallKind::Attr { invoc_attr_index: index, .. } => {
+ let censor = if let MacroCallKind::Derive { .. } = loc.kind {
+ censor_derive_input(index, &ast::Adt::cast(speculative_args.clone())?)
+ } else {
+ attr_source(index, &ast::Item::cast(speculative_args.clone())?)
+ .into_iter()
+ .map(|it| it.syntax().clone().into())
+ .collect()
+ };
+
+ let censor_cfg =
+ cfg_process::process_cfg_attrs(speculative_args, &loc, db).unwrap_or_default();
+ let mut fixups = fixup::fixup_syntax(span_map, speculative_args, span);
fixups.append.retain(|it, _| match it {
- syntax::NodeOrToken::Node(it) => !censor.contains(it),
syntax::NodeOrToken::Token(_) => true,
+ it => !censor.contains(it) && !censor_cfg.contains(it),
});
fixups.remove.extend(censor);
+ fixups.remove.extend(censor_cfg);
+
(
mbe::syntax_node_to_token_tree_modified(
speculative_args,
span_map,
fixups.append,
fixups.remove,
- loc.call_site,
+ span,
),
fixups.undo_info,
)
@@ -184,9 +205,8 @@ pub fn expand_speculative(
}?;
match attr.token_tree() {
Some(token_tree) => {
- let mut tree =
- syntax_node_to_token_tree(token_tree.syntax(), span_map, loc.call_site);
- tree.delimiter = tt::Delimiter::invisible_spanned(loc.call_site);
+ let mut tree = syntax_node_to_token_tree(token_tree.syntax(), span_map, span);
+ tree.delimiter = tt::Delimiter::invisible_spanned(span);
Some(tree)
}
@@ -199,36 +219,36 @@ pub fn expand_speculative(
// Do the actual expansion, we need to directly expand the proc macro due to the attribute args
// Otherwise the expand query will fetch the non speculative attribute args and pass those instead.
let mut speculative_expansion = match loc.def.kind {
- MacroDefKind::ProcMacro(expander, ..) => {
- tt.delimiter = tt::Delimiter::invisible_spanned(loc.call_site);
+ MacroDefKind::ProcMacro(expander, _, ast) => {
+ let span = db.proc_macro_span(ast);
+ tt.delimiter = tt::Delimiter::invisible_spanned(span);
expander.expand(
db,
loc.def.krate,
loc.krate,
&tt,
attr_arg.as_ref(),
- span_with_def_site_ctxt(db, loc.def.span, actual_macro_call),
- span_with_call_site_ctxt(db, loc.def.span, actual_macro_call),
- span_with_mixed_site_ctxt(db, loc.def.span, actual_macro_call),
+ span_with_def_site_ctxt(db, span, actual_macro_call),
+ span_with_call_site_ctxt(db, span, actual_macro_call),
+ span_with_mixed_site_ctxt(db, span, actual_macro_call),
)
}
MacroDefKind::BuiltInAttr(BuiltinAttrExpander::Derive, _) => {
- pseudo_derive_attr_expansion(&tt, attr_arg.as_ref()?, loc.call_site)
+ pseudo_derive_attr_expansion(&tt, attr_arg.as_ref()?, span)
+ }
+ MacroDefKind::Declarative(it) => {
+ db.decl_macro_expander(loc.krate, it).expand_unhygienic(db, tt, loc.def.krate, span)
+ }
+ MacroDefKind::BuiltIn(it, _) => {
+ it.expand(db, actual_macro_call, &tt, span).map_err(Into::into)
}
- MacroDefKind::Declarative(it) => db.decl_macro_expander(loc.krate, it).expand_unhygienic(
- db,
- tt,
- loc.def.krate,
- loc.call_site,
- ),
- MacroDefKind::BuiltIn(it, _) => it.expand(db, actual_macro_call, &tt).map_err(Into::into),
MacroDefKind::BuiltInDerive(it, ..) => {
- it.expand(db, actual_macro_call, &tt).map_err(Into::into)
+ it.expand(db, actual_macro_call, &tt, span).map_err(Into::into)
}
MacroDefKind::BuiltInEager(it, _) => {
- it.expand(db, actual_macro_call, &tt).map_err(Into::into)
+ it.expand(db, actual_macro_call, &tt, span).map_err(Into::into)
}
- MacroDefKind::BuiltInAttr(it, _) => it.expand(db, actual_macro_call, &tt),
+ MacroDefKind::BuiltInAttr(it, _) => it.expand(db, actual_macro_call, &tt, span),
};
let expand_to = loc.expand_to();
@@ -319,181 +339,161 @@ pub(crate) fn parse_with_map(
}
}
-// FIXME: for derive attributes, this will return separate copies of the same structures!
+// FIXME: for derive attributes, this will return separate copies of the same structures! Though
+// they may differ in spans due to differing call sites...
fn macro_arg(
db: &dyn ExpandDatabase,
id: MacroCallId,
- // FIXME: consider the following by putting fixup info into eager call info args
- // ) -> ValueResult<Arc<(tt::Subtree, SyntaxFixupUndoInfo)>, Arc<Box<[SyntaxError]>>> {
-) -> ValueResult<(Arc<tt::Subtree>, SyntaxFixupUndoInfo), Arc<Box<[SyntaxError]>>> {
+) -> (Arc<tt::Subtree>, SyntaxFixupUndoInfo, Span) {
let loc = db.lookup_intern_macro_call(id);
- if let Some(EagerCallInfo { arg, .. }) = matches!(loc.def.kind, MacroDefKind::BuiltInEager(..))
- .then(|| loc.eager.as_deref())
- .flatten()
+
+ if let MacroCallLoc {
+ def: MacroDefId { kind: MacroDefKind::BuiltInEager(..), .. },
+ kind: MacroCallKind::FnLike { eager: Some(eager), .. },
+ ..
+ } = &loc
{
- ValueResult::ok((arg.clone(), SyntaxFixupUndoInfo::NONE))
- } else {
- let (parse, map) = parse_with_map(db, loc.kind.file_id());
- let root = parse.syntax_node();
-
- let syntax = match loc.kind {
- MacroCallKind::FnLike { ast_id, .. } => {
- let dummy_tt = |kind| {
- (
- Arc::new(tt::Subtree {
- delimiter: tt::Delimiter {
- open: loc.call_site,
- close: loc.call_site,
- kind,
- },
- token_trees: Box::default(),
- }),
- SyntaxFixupUndoInfo::default(),
- )
- };
+ return (eager.arg.clone(), SyntaxFixupUndoInfo::NONE, eager.span);
+ }
- let node = &ast_id.to_ptr(db).to_node(&root);
- let offset = node.syntax().text_range().start();
- let Some(tt) = node.token_tree() else {
- return ValueResult::new(
- dummy_tt(tt::DelimiterKind::Invisible),
- Arc::new(Box::new([SyntaxError::new_at_offset(
- "missing token tree".to_owned(),
- offset,
- )])),
- );
- };
- let first = tt.left_delimiter_token().map(|it| it.kind()).unwrap_or(T!['(']);
- let last = tt.right_delimiter_token().map(|it| it.kind()).unwrap_or(T![.]);
-
- let mismatched_delimiters = !matches!(
- (first, last),
- (T!['('], T![')']) | (T!['['], T![']']) | (T!['{'], T!['}'])
- );
- if mismatched_delimiters {
- // Don't expand malformed (unbalanced) macro invocations. This is
- // less than ideal, but trying to expand unbalanced macro calls
- // sometimes produces pathological, deeply nested code which breaks
- // all kinds of things.
- //
- // So instead, we'll return an empty subtree here
- cov_mark::hit!(issue9358_bad_macro_stack_overflow);
-
- let kind = match first {
- _ if loc.def.is_proc_macro() => tt::DelimiterKind::Invisible,
- T!['('] => tt::DelimiterKind::Parenthesis,
- T!['['] => tt::DelimiterKind::Bracket,
- T!['{'] => tt::DelimiterKind::Brace,
- _ => tt::DelimiterKind::Invisible,
- };
- return ValueResult::new(
- dummy_tt(kind),
- Arc::new(Box::new([SyntaxError::new_at_offset(
- "mismatched delimiters".to_owned(),
- offset,
- )])),
- );
- }
- tt.syntax().clone()
- }
- MacroCallKind::Derive { ast_id, .. } => {
- ast_id.to_ptr(db).to_node(&root).syntax().clone()
- }
- MacroCallKind::Attr { ast_id, .. } => ast_id.to_ptr(db).to_node(&root).syntax().clone(),
- };
- let (mut tt, undo_info) = match loc.kind {
- MacroCallKind::FnLike { .. } => (
- mbe::syntax_node_to_token_tree(&syntax, map.as_ref(), loc.call_site),
- SyntaxFixupUndoInfo::NONE,
- ),
- MacroCallKind::Derive { .. } | MacroCallKind::Attr { .. } => {
- let censor = censor_for_macro_input(&loc, &syntax);
- let mut fixups = fixup::fixup_syntax(map.as_ref(), &syntax, loc.call_site);
- fixups.append.retain(|it, _| match it {
- syntax::NodeOrToken::Node(it) => !censor.contains(it),
- syntax::NodeOrToken::Token(_) => true,
- });
- fixups.remove.extend(censor);
- {
- let mut tt = mbe::syntax_node_to_token_tree_modified(
- &syntax,
- map.as_ref(),
- fixups.append.clone(),
- fixups.remove.clone(),
- loc.call_site,
- );
- reverse_fixups(&mut tt, &fixups.undo_info);
- }
+ let (parse, map) = parse_with_map(db, loc.kind.file_id());
+ let root = parse.syntax_node();
+
+ let (censor, item_node, span) = match loc.kind {
+ MacroCallKind::FnLike { ast_id, .. } => {
+ let node = &ast_id.to_ptr(db).to_node(&root);
+ let path_range = node
+ .path()
+ .map_or_else(|| node.syntax().text_range(), |path| path.syntax().text_range());
+ let span = map.span_for_range(path_range);
+
+ let dummy_tt = |kind| {
(
- mbe::syntax_node_to_token_tree_modified(
- &syntax,
- map,
- fixups.append,
- fixups.remove,
- loc.call_site,
- ),
- fixups.undo_info,
+ Arc::new(tt::Subtree {
+ delimiter: tt::Delimiter { open: span, close: span, kind },
+ token_trees: Box::default(),
+ }),
+ SyntaxFixupUndoInfo::default(),
+ span,
)
- }
- };
+ };
- if loc.def.is_proc_macro() {
- // proc macros expect their inputs without parentheses, MBEs expect it with them included
- tt.delimiter.kind = tt::DelimiterKind::Invisible;
- }
+ let Some(tt) = node.token_tree() else {
+ return dummy_tt(tt::DelimiterKind::Invisible);
+ };
+ let first = tt.left_delimiter_token().map(|it| it.kind()).unwrap_or(T!['(']);
+ let last = tt.right_delimiter_token().map(|it| it.kind()).unwrap_or(T![.]);
+
+ let mismatched_delimiters = !matches!(
+ (first, last),
+ (T!['('], T![')']) | (T!['['], T![']']) | (T!['{'], T!['}'])
+ );
+ if mismatched_delimiters {
+ // Don't expand malformed (unbalanced) macro invocations. This is
+ // less than ideal, but trying to expand unbalanced macro calls
+ // sometimes produces pathological, deeply nested code which breaks
+ // all kinds of things.
+ //
+ // So instead, we'll return an empty subtree here
+ cov_mark::hit!(issue9358_bad_macro_stack_overflow);
+
+ let kind = match first {
+ _ if loc.def.is_proc_macro() => tt::DelimiterKind::Invisible,
+ T!['('] => tt::DelimiterKind::Parenthesis,
+ T!['['] => tt::DelimiterKind::Bracket,
+ T!['{'] => tt::DelimiterKind::Brace,
+ _ => tt::DelimiterKind::Invisible,
+ };
+ return dummy_tt(kind);
+ }
- if matches!(loc.def.kind, MacroDefKind::BuiltInEager(..)) {
- match parse.errors() {
- errors if errors.is_empty() => ValueResult::ok((Arc::new(tt), undo_info)),
- errors => ValueResult::new(
- (Arc::new(tt), undo_info),
- // Box::<[_]>::from(res.errors()), not stable yet
- Arc::new(errors.to_vec().into_boxed_slice()),
- ),
+ let mut tt = mbe::syntax_node_to_token_tree(tt.syntax(), map.as_ref(), span);
+ if loc.def.is_proc_macro() {
+ // proc macros expect their inputs without parentheses, MBEs expect it with them included
+ tt.delimiter.kind = tt::DelimiterKind::Invisible;
}
- } else {
- ValueResult::ok((Arc::new(tt), undo_info))
+ return (Arc::new(tt), SyntaxFixupUndoInfo::NONE, span);
+ }
+ MacroCallKind::Derive { ast_id, derive_attr_index, .. } => {
+ let node = ast_id.to_ptr(db).to_node(&root);
+ let censor_derive_input = censor_derive_input(derive_attr_index, &node);
+ let item_node = node.into();
+ let attr_source = attr_source(derive_attr_index, &item_node);
+ // FIXME: This is wrong, this should point to the path of the derive attribute`
+ let span =
+ map.span_for_range(attr_source.as_ref().and_then(|it| it.path()).map_or_else(
+ || item_node.syntax().text_range(),
+ |it| it.syntax().text_range(),
+ ));
+ (censor_derive_input, item_node, span)
+ }
+ MacroCallKind::Attr { ast_id, invoc_attr_index, .. } => {
+ let node = ast_id.to_ptr(db).to_node(&root);
+ let attr_source = attr_source(invoc_attr_index, &node);
+ let span = map.span_for_range(
+ attr_source
+ .as_ref()
+ .and_then(|it| it.path())
+ .map_or_else(|| node.syntax().text_range(), |it| it.syntax().text_range()),
+ );
+ (attr_source.into_iter().map(|it| it.syntax().clone().into()).collect(), node, span)
}
+ };
+
+ let (mut tt, undo_info) = {
+ let syntax = item_node.syntax();
+ let censor_cfg = cfg_process::process_cfg_attrs(syntax, &loc, db).unwrap_or_default();
+ let mut fixups = fixup::fixup_syntax(map.as_ref(), syntax, span);
+ fixups.append.retain(|it, _| match it {
+ syntax::NodeOrToken::Token(_) => true,
+ it => !censor.contains(it) && !censor_cfg.contains(it),
+ });
+ fixups.remove.extend(censor);
+ fixups.remove.extend(censor_cfg);
+
+ (
+ mbe::syntax_node_to_token_tree_modified(
+ syntax,
+ map,
+ fixups.append,
+ fixups.remove,
+ span,
+ ),
+ fixups.undo_info,
+ )
+ };
+
+ if loc.def.is_proc_macro() {
+ // proc macros expect their inputs without parentheses, MBEs expect it with them included
+ tt.delimiter.kind = tt::DelimiterKind::Invisible;
}
+
+ (Arc::new(tt), undo_info, span)
}
// FIXME: Censoring info should be calculated by the caller! Namely by name resolution
-/// Certain macro calls expect some nodes in the input to be preprocessed away, namely:
-/// - derives expect all `#[derive(..)]` invocations up to the currently invoked one to be stripped
-/// - attributes expect the invoking attribute to be stripped
-fn censor_for_macro_input(loc: &MacroCallLoc, node: &SyntaxNode) -> FxHashSet<SyntaxNode> {
+/// Derives expect all `#[derive(..)]` invocations up to (and including) the currently invoked one to be stripped
+fn censor_derive_input(derive_attr_index: AttrId, node: &ast::Adt) -> FxHashSet<SyntaxElement> {
// FIXME: handle `cfg_attr`
- (|| {
- let censor = match loc.kind {
- MacroCallKind::FnLike { .. } => return None,
- MacroCallKind::Derive { derive_attr_index, .. } => {
- cov_mark::hit!(derive_censoring);
- ast::Item::cast(node.clone())?
- .attrs()
- .take(derive_attr_index.ast_index() + 1)
- // FIXME, this resolution should not be done syntactically
- // derive is a proper macro now, no longer builtin
- // But we do not have resolution at this stage, this means
- // we need to know about all macro calls for the given ast item here
- // so we require some kind of mapping...
- .filter(|attr| attr.simple_name().as_deref() == Some("derive"))
- .map(|it| it.syntax().clone())
- .collect()
- }
- MacroCallKind::Attr { .. } if loc.def.is_attribute_derive() => return None,
- MacroCallKind::Attr { invoc_attr_index, .. } => {
- cov_mark::hit!(attribute_macro_attr_censoring);
- collect_attrs(&ast::Item::cast(node.clone())?)
- .nth(invoc_attr_index.ast_index())
- .and_then(|x| Either::left(x.1))
- .map(|attr| attr.syntax().clone())
- .into_iter()
- .collect()
- }
- };
- Some(censor)
- })()
- .unwrap_or_default()
+ cov_mark::hit!(derive_censoring);
+ collect_attrs(node)
+ .take(derive_attr_index.ast_index() + 1)
+ .filter_map(|(_, attr)| Either::left(attr))
+ // FIXME, this resolution should not be done syntactically
+ // derive is a proper macro now, no longer builtin
+ // But we do not have resolution at this stage, this means
+ // we need to know about all macro calls for the given ast item here
+ // so we require some kind of mapping...
+ .filter(|attr| attr.simple_name().as_deref() == Some("derive"))
+ .map(|it| it.syntax().clone().into())
+ .collect()
+}
+
+/// Attributes expect the invoking attribute to be stripped
+fn attr_source(invoc_attr_index: AttrId, node: &ast::Item) -> Option<ast::Attr> {
+ // FIXME: handle `cfg_attr`
+ cov_mark::hit!(attribute_macro_attr_censoring);
+ collect_attrs(node).nth(invoc_attr_index.ast_index()).and_then(|(_, attr)| Either::left(attr))
}
impl TokenExpander {
@@ -523,74 +523,64 @@ fn macro_expand(
) -> ExpandResult<CowArc<tt::Subtree>> {
let _p = tracing::span!(tracing::Level::INFO, "macro_expand").entered();
- let ExpandResult { value: tt, mut err } = match loc.def.kind {
+ let (ExpandResult { value: tt, err }, span) = match loc.def.kind {
MacroDefKind::ProcMacro(..) => return db.expand_proc_macro(macro_call_id).map(CowArc::Arc),
_ => {
- let ValueResult { value: (macro_arg, undo_info), err } = db.macro_arg(macro_call_id);
- let format_parse_err = |err: Arc<Box<[SyntaxError]>>| {
- let mut buf = String::new();
- for err in &**err {
- use std::fmt::Write;
- _ = write!(buf, "{}, ", err);
- }
- buf.pop();
- buf.pop();
- ExpandError::other(buf)
- };
+ let (macro_arg, undo_info, span) = db.macro_arg(macro_call_id);
let arg = &*macro_arg;
- let res = match loc.def.kind {
- MacroDefKind::Declarative(id) => {
- db.decl_macro_expander(loc.def.krate, id).expand(db, arg.clone(), macro_call_id)
- }
- MacroDefKind::BuiltIn(it, _) => {
- it.expand(db, macro_call_id, arg).map_err(Into::into)
- }
- // This might look a bit odd, but we do not expand the inputs to eager macros here.
- // Eager macros inputs are expanded, well, eagerly when we collect the macro calls.
- // That kind of expansion uses the ast id map of an eager macros input though which goes through
- // the HirFileId machinery. As eager macro inputs are assigned a macro file id that query
- // will end up going through here again, whereas we want to just want to inspect the raw input.
- // As such we just return the input subtree here.
- MacroDefKind::BuiltInEager(..) if loc.eager.is_none() => {
- return ExpandResult {
- value: CowArc::Arc(macro_arg.clone()),
- err: err.map(format_parse_err),
- };
- }
- MacroDefKind::BuiltInDerive(it, _) => {
- it.expand(db, macro_call_id, arg).map_err(Into::into)
- }
- MacroDefKind::BuiltInEager(it, _) => {
- it.expand(db, macro_call_id, arg).map_err(Into::into)
- }
- MacroDefKind::BuiltInAttr(it, _) => {
- let mut res = it.expand(db, macro_call_id, arg);
- fixup::reverse_fixups(&mut res.value, &undo_info);
- res
- }
- _ => unreachable!(),
- };
- ExpandResult {
- value: res.value,
- // if the arg had parse errors, show them instead of the expansion errors
- err: err.map(format_parse_err).or(res.err),
- }
+ let res =
+ match loc.def.kind {
+ MacroDefKind::Declarative(id) => db
+ .decl_macro_expander(loc.def.krate, id)
+ .expand(db, arg.clone(), macro_call_id, span),
+ MacroDefKind::BuiltIn(it, _) => {
+ it.expand(db, macro_call_id, arg, span).map_err(Into::into)
+ }
+ MacroDefKind::BuiltInDerive(it, _) => {
+ it.expand(db, macro_call_id, arg, span).map_err(Into::into)
+ }
+ MacroDefKind::BuiltInEager(it, _) => {
+ // This might look a bit odd, but we do not expand the inputs to eager macros here.
+ // Eager macros inputs are expanded, well, eagerly when we collect the macro calls.
+ // That kind of expansion uses the ast id map of an eager macros input though which goes through
+ // the HirFileId machinery. As eager macro inputs are assigned a macro file id that query
+ // will end up going through here again, whereas we want to just want to inspect the raw input.
+ // As such we just return the input subtree here.
+ let eager = match &loc.kind {
+ MacroCallKind::FnLike { eager: None, .. } => {
+ return ExpandResult::ok(CowArc::Arc(macro_arg.clone()));
+ }
+ MacroCallKind::FnLike { eager: Some(eager), .. } => Some(&**eager),
+ _ => None,
+ };
+
+ let mut res = it.expand(db, macro_call_id, arg, span).map_err(Into::into);
+
+ if let Some(EagerCallInfo { error, .. }) = eager {
+ // FIXME: We should report both errors!
+ res.err = error.clone().or(res.err);
+ }
+ res
+ }
+ MacroDefKind::BuiltInAttr(it, _) => {
+ let mut res = it.expand(db, macro_call_id, arg, span);
+ fixup::reverse_fixups(&mut res.value, &undo_info);
+ res
+ }
+ _ => unreachable!(),
+ };
+ (ExpandResult { value: res.value, err: res.err }, span)
}
};
- if let Some(EagerCallInfo { error, .. }) = loc.eager.as_deref() {
- // FIXME: We should report both errors!
- err = error.clone().or(err);
- }
-
// Skip checking token tree limit for include! macro call
if !loc.def.is_include() {
// Set a hard limit for the expanded tt
if let Err(value) = check_tt_count(&tt) {
return value.map(|()| {
CowArc::Owned(tt::Subtree {
- delimiter: tt::Delimiter::invisible_spanned(loc.call_site),
+ delimiter: tt::Delimiter::invisible_spanned(span),
token_trees: Box::new([]),
})
});
@@ -600,12 +590,23 @@ fn macro_expand(
ExpandResult { value: CowArc::Owned(tt), err }
}
+fn proc_macro_span(db: &dyn ExpandDatabase, ast: AstId<ast::Fn>) -> Span {
+ let root = db.parse_or_expand(ast.file_id);
+ let ast_id_map = &db.ast_id_map(ast.file_id);
+ let span_map = &db.span_map(ast.file_id);
+
+ let node = ast_id_map.get(ast.value).to_node(&root);
+ let range = ast::HasName::name(&node)
+ .map_or_else(|| node.syntax().text_range(), |name| name.syntax().text_range());
+ span_map.span_for_range(range)
+}
+
fn expand_proc_macro(db: &dyn ExpandDatabase, id: MacroCallId) -> ExpandResult<Arc<tt::Subtree>> {
let loc = db.lookup_intern_macro_call(id);
- let (macro_arg, undo_info) = db.macro_arg(id).value;
+ let (macro_arg, undo_info, span) = db.macro_arg(id);
- let expander = match loc.def.kind {
- MacroDefKind::ProcMacro(expander, ..) => expander,
+ let (expander, ast) = match loc.def.kind {
+ MacroDefKind::ProcMacro(expander, _, ast) => (expander, ast),
_ => unreachable!(),
};
@@ -614,22 +615,25 @@ fn expand_proc_macro(db: &dyn ExpandDatabase, id: MacroCallId) -> ExpandResult<A
_ => None,
};
- let ExpandResult { value: mut tt, err } = expander.expand(
- db,
- loc.def.krate,
- loc.krate,
- &macro_arg,
- attr_arg,
- span_with_def_site_ctxt(db, loc.def.span, id),
- span_with_call_site_ctxt(db, loc.def.span, id),
- span_with_mixed_site_ctxt(db, loc.def.span, id),
- );
+ let ExpandResult { value: mut tt, err } = {
+ let span = db.proc_macro_span(ast);
+ expander.expand(
+ db,
+ loc.def.krate,
+ loc.krate,
+ &macro_arg,
+ attr_arg,
+ span_with_def_site_ctxt(db, span, id),
+ span_with_call_site_ctxt(db, span, id),
+ span_with_mixed_site_ctxt(db, span, id),
+ )
+ };
// Set a hard limit for the expanded tt
if let Err(value) = check_tt_count(&tt) {
return value.map(|()| {
Arc::new(tt::Subtree {
- delimiter: tt::Delimiter::invisible_spanned(loc.call_site),
+ delimiter: tt::Delimiter::invisible_spanned(span),
token_trees: Box::new([]),
})
});