Vendor things

This commit is contained in:
John Doty 2024-03-08 11:03:01 -08:00
parent 5deceec006
commit 977e3c17e5
19434 changed files with 10682014 additions and 0 deletions

View file

@ -0,0 +1 @@
{"files":{"Cargo.toml":"a1858adb3b56503b35e6af9aa6361715408123a0b778a61d88f0da7e0e61cd6e","src/common/comments.rs":"6ef87d6ed47299f1c5f6f8b60dc987cb74461710c08c4fba6436bef92f3b5414","src/common/mod.rs":"ea477fb9e79c38d2a056acbac63cf2b4820bad1aa67f51bafe9ad651ea2f93e8","src/common/pos.rs":"2768c7480f6a0829a2a79368ed38a7a347a1f67a79a522402f2d3222c9499608","src/common/text_encoding.rs":"c69d4c2e9a561b03b934059eb85615c8be740d40286452fc8e9dfaa111187120","src/common/text_info.rs":"a632f518e3728ab336bb734cab14f8c786cddeda81719b037380d2dce48dcd76","src/common/tokens.rs":"ab145c846258993d084c9d7654d73ea3c6742259072d788ec6dfaadf5e7483fd","src/common/types.rs":"5491053e757f2e665d9fe6b06d1330d3d43803988b7755d0700d834156e0fdc7","src/lib.rs":"ebeb5af913ded0dfd9efe9e3b7df053e8731721a4d86f4539efd60afd9331aef","src/test_helpers.rs":"c6a03c0745e8fe38b82b0be89327bcde5789317197daa545420601d0a063cda7","src/view/custom.rs":"70376e92f9a4195436c6a53537836e34a6193a4afbc6863b54f1794fa362132b","src/view/generated.rs":"04803136c8c9262d7adc7af3dbc48de43152a7936bae3fa1f539dbb96d3ea733","src/view/mod.rs":"ba58cf76d51811f66e6673a499fefea842a973c70ecd148693225026c37c98cc","src/view/test_helpers.rs":"1dd303b22a44d6a7b251bd91dd8a03be4b5b4eb7be5efc0bd601853db904e218","src/view/types.rs":"8f8e451cba71c127449c13f3fdfb79faa236beb424b5a9b8ac38f36980fb26db"},"package":"6a0a2492465344a58a37ae119de59e81fe5a2885f2711c7b5048ef0dfa14ce42"}

View file

@ -0,0 +1,52 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
name = "dprint-swc-ext"
version = "0.12.0"
authors = ["David Sherret <dsherret@gmail.com>"]
description = "Functionality to make swc easier to work with."
license = "MIT"
[package.metadata.docs.rs]
all-features = true
[dependencies.bumpalo]
version = "3.13.0"
[dependencies.num-bigint]
version = "0.4"
[dependencies.rustc-hash]
version = "1.1.0"
[dependencies.swc_atoms]
version = "0.5.9"
[dependencies.swc_common]
version = "0.32.0"
[dependencies.swc_ecma_ast]
version = "0.109.0"
[dependencies.swc_ecma_parser]
version = "0.139.0"
[dependencies.text_lines]
version = "0.6.0"
[dev-dependencies.pretty_assertions]
version = "1.3.0"
[features]
sourcemap = ["swc_common/sourcemap"]
view = []

View file

@ -0,0 +1,199 @@
use super::pos::*;
use super::text_info::*;
use super::tokens::*;
use crate::swc::common::comments::Comment;
use crate::swc::common::comments::SingleThreadedCommentsMapInner;
pub struct CommentContainer<'a> {
pub leading: &'a SingleThreadedCommentsMapInner,
pub trailing: &'a SingleThreadedCommentsMapInner,
tokens: &'a TokenContainer<'a>,
text_info: &'a SourceTextInfo,
}
impl<'a> CommentContainer<'a> {
pub fn new(
leading: &'a SingleThreadedCommentsMapInner,
trailing: &'a SingleThreadedCommentsMapInner,
tokens: &'a TokenContainer<'a>,
text_info: &'a SourceTextInfo,
) -> Self {
CommentContainer {
leading,
trailing,
tokens,
text_info,
}
}
pub fn all_comments(&'a self) -> CommentsIterator<'a> {
let approx_cap = self.leading.len() + self.trailing.len();
let mut v = Vec::with_capacity(approx_cap);
v.extend(self.leading.values());
v.extend(self.trailing.values());
CommentsIterator::new(v)
}
pub fn leading_comments(&'a self, start: SourcePos) -> CommentsIterator<'a> {
let previous_token_hi = self.tokens.get_token_index_at_start(start).map(|index| {
if index == 0 {
self.text_info.range().start.as_source_pos()
} else {
self.tokens.get_token_at_index(index - 1).unwrap().end()
}
});
let leading = self.get_leading(start);
if let Some(previous_token_hi) = previous_token_hi {
let trailing = self.get_trailing(previous_token_hi);
combine_comment_vecs(trailing, leading)
} else {
leading.map(|l| CommentsIterator::new(vec![l])).unwrap_or_default()
}
}
pub fn trailing_comments(&'a self, end: SourcePos) -> CommentsIterator<'a> {
let next_token_lo = self.tokens.get_token_index_at_end(end).map(|index| {
self
.tokens
.get_token_at_index(index + 1)
.map(|t| t.start())
.unwrap_or_else(|| self.text_info.range().end)
});
let trailing = self.get_trailing(end);
if let Some(next_token_lo) = next_token_lo {
let leading = self.get_leading(next_token_lo);
combine_comment_vecs(trailing, leading)
} else {
trailing.map(|t| CommentsIterator::new(vec![t])).unwrap_or_default()
}
}
fn get_leading(&'a self, start: SourcePos) -> Option<&'a Vec<Comment>> {
self.leading.get(&start.as_byte_pos())
}
fn get_trailing(&'a self, end: SourcePos) -> Option<&'a Vec<Comment>> {
self.trailing.get(&end.as_byte_pos())
}
}
fn combine_comment_vecs<'a>(a: Option<&'a Vec<Comment>>, b: Option<&'a Vec<Comment>>) -> CommentsIterator<'a> {
let length = if a.is_some() { 1 } else { 0 } + if b.is_some() { 1 } else { 0 };
let mut comment_vecs = Vec::with_capacity(length);
if let Some(a) = a {
comment_vecs.push(a);
}
if let Some(b) = b {
comment_vecs.push(b);
}
CommentsIterator::new(comment_vecs)
}
#[derive(Clone)]
pub struct CommentsIterator<'a> {
comment_vecs: Vec<&'a Vec<Comment>>,
outer_index: usize,
inner_index: usize,
outer_index_back: usize,
inner_index_back: usize,
}
impl<'a> CommentsIterator<'a> {
pub fn empty() -> Self {
Self::new(Vec::with_capacity(0))
}
pub fn new(comment_vecs: Vec<&'a Vec<Comment>>) -> Self {
let outer_index_back = comment_vecs.len();
CommentsIterator {
comment_vecs,
outer_index: 0,
inner_index: 0,
outer_index_back,
inner_index_back: 0,
}
}
pub fn reset(&mut self) {
self.outer_index = 0;
self.inner_index = 0;
}
pub fn extend(&mut self, iterator: CommentsIterator<'a>) {
self.comment_vecs.extend(iterator.comment_vecs);
// reset the back iterator
self.outer_index_back = self.comment_vecs.len();
self.inner_index_back = 0;
}
pub fn is_empty(&self) -> bool {
for comments in self.comment_vecs.iter() {
if !comments.is_empty() {
return false;
}
}
true
}
pub fn peek_last_comment(&self) -> Option<&'a Comment> {
if let Some(comments) = self.comment_vecs.last() {
comments.last()
} else {
None
}
}
}
impl<'a> Default for CommentsIterator<'a> {
fn default() -> Self {
Self::empty()
}
}
impl<'a> Iterator for CommentsIterator<'a> {
type Item = &'a Comment;
fn next(&mut self) -> Option<&'a Comment> {
loop {
if let Some(comments) = self.comment_vecs.get(self.outer_index) {
if let Some(comment) = comments.get(self.inner_index) {
self.inner_index += 1;
return Some(comment);
} else {
self.inner_index = 0;
self.outer_index += 1;
}
} else {
return None;
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let mut next_inner_index = self.inner_index;
let mut count = 0;
for comment_vec in &self.comment_vecs[self.outer_index..] {
count += comment_vec.len() - next_inner_index;
next_inner_index = 0;
}
(count, Some(count))
}
}
impl<'a> DoubleEndedIterator for CommentsIterator<'a> {
fn next_back(&mut self) -> Option<&'a Comment> {
if self.inner_index_back == 0 {
if self.outer_index_back == 0 {
return None;
}
self.outer_index_back -= 1;
self.inner_index_back = self.comment_vecs.get(self.outer_index_back).unwrap().len() - 1;
} else {
self.inner_index_back -= 1;
}
self.comment_vecs.get(self.outer_index_back).and_then(|inner| inner.get(self.inner_index_back))
}
}

View file

@ -0,0 +1,20 @@
mod comments;
mod pos;
mod text_encoding;
mod text_info;
mod tokens;
mod types;
pub use comments::*;
pub use pos::*;
pub use text_info::*;
pub use text_lines;
pub use tokens::*;
pub use types::*;
/// A 0-indexed line and column type.
pub type LineAndColumnIndex = text_lines::LineAndColumnIndex;
/// A 1-indexed line and column type which should be used for
/// display purposes only (ex. in diagnostics).
pub type LineAndColumnDisplay = text_lines::LineAndColumnDisplay;

View file

@ -0,0 +1,405 @@
use crate::swc::common::BytePos;
use crate::swc::common::Span;
use crate::swc::parser::token::TokenAndSpan;
use super::comments::*;
use super::text_info::*;
use super::types::*;
/// Swc unfortunately uses `BytePos(0)` as a magic value. This means
/// that we can't have byte positions of nodes line up with the text.
/// To get around this, we have created our own `SourcePos` wrapper
/// that hides the underlying swc byte position so it can't be used
/// incorrectly.
#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct SourcePos(BytePos);
impl SourcePos {
#[cfg(test)]
pub fn new(index: usize) -> Self {
Self(StartSourcePos::START_SOURCE_POS.as_byte_pos() + BytePos(index as u32))
}
pub fn as_byte_pos(&self) -> BytePos {
self.0
}
pub fn as_byte_index(&self, start_pos: StartSourcePos) -> usize {
*self - start_pos
}
/// Do not use this except when receiving an swc byte position
/// from swc and needing to convert it to a source position.
/// If you need to create a `SourcePos` then you should get
/// the text info's start position and add to it in order to
/// get a new source position.
pub fn unsafely_from_byte_pos(byte_pos: BytePos) -> Self {
#[cfg(debug_assertions)]
if byte_pos < StartSourcePos::START_SOURCE_POS.as_byte_pos() {
panic!(concat!(
"The provided byte position was less than the start byte position. ",
"Ensure the source file is parsed starting at SourcePos::START_SOURCE_POS."
))
}
Self(byte_pos)
}
pub(crate) fn as_usize(&self) -> usize {
(self.as_byte_pos() - StartSourcePos::START_SOURCE_POS.as_byte_pos()).0 as usize
}
}
impl std::fmt::Debug for SourcePos {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("SourcePos").field(&self.as_usize()).finish()
}
}
impl std::fmt::Display for SourcePos {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.as_usize().to_string())
}
}
impl std::ops::Add<usize> for SourcePos {
type Output = SourcePos;
fn add(self, rhs: usize) -> Self::Output {
SourcePos(BytePos(self.0 .0 + rhs as u32))
}
}
impl std::ops::Sub<usize> for SourcePos {
type Output = SourcePos;
fn sub(self, rhs: usize) -> Self::Output {
SourcePos(BytePos(self.0 .0 - rhs as u32))
}
}
impl std::ops::Sub<SourcePos> for SourcePos {
type Output = usize;
fn sub(self, rhs: SourcePos) -> Self::Output {
(self.0 - rhs.0).0 as usize
}
}
impl SourceRanged for SourcePos {
fn start(&self) -> SourcePos {
*self
}
fn end(&self) -> SourcePos {
*self
}
}
/// A special source pos that indicates the source start
/// which functions can use as a parameter type in order
/// to ensure someone doesn't provide the wrong position.
#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct StartSourcePos(pub(crate) SourcePos);
impl StartSourcePos {
/// Use this value as the start byte position when parsing.
pub const START_SOURCE_POS: StartSourcePos = StartSourcePos(SourcePos(BytePos(1)));
pub fn as_byte_pos(&self) -> BytePos {
self.0.as_byte_pos()
}
pub fn as_source_pos(&self) -> SourcePos {
self.0
}
pub(crate) fn as_usize(&self) -> usize {
(self.as_byte_pos() - StartSourcePos::START_SOURCE_POS.as_byte_pos()).0 as usize
}
}
// Only want Into and not From in order to prevent
// people from creating one of these easily.
#[allow(clippy::from_over_into)]
impl Into<SourcePos> for StartSourcePos {
fn into(self) -> SourcePos {
self.as_source_pos()
}
}
impl std::fmt::Debug for StartSourcePos {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("StartSourcePos").field(&self.as_usize()).finish()
}
}
impl std::fmt::Display for StartSourcePos {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.as_usize().to_string())
}
}
impl std::ops::Add<usize> for StartSourcePos {
type Output = SourcePos;
fn add(self, rhs: usize) -> Self::Output {
SourcePos(BytePos(self.0 .0 .0 + rhs as u32))
}
}
impl std::ops::Sub<StartSourcePos> for SourcePos {
type Output = usize;
fn sub(self, rhs: StartSourcePos) -> Self::Output {
(self.0 - rhs.0 .0).0 as usize
}
}
impl std::cmp::PartialEq<SourcePos> for StartSourcePos {
fn eq(&self, other: &SourcePos) -> bool {
self.0 == *other
}
}
impl std::cmp::PartialOrd<SourcePos> for StartSourcePos {
fn partial_cmp(&self, other: &SourcePos) -> Option<std::cmp::Ordering> {
self.0.partial_cmp(other)
}
}
impl std::cmp::PartialEq<StartSourcePos> for SourcePos {
fn eq(&self, other: &StartSourcePos) -> bool {
*self == other.0
}
}
impl std::cmp::PartialOrd<StartSourcePos> for SourcePos {
fn partial_cmp(&self, other: &StartSourcePos) -> Option<std::cmp::Ordering> {
self.partial_cmp(&other.0)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct SourceRange<T = SourcePos>
where
T: Into<SourcePos> + Clone + Copy,
{
pub start: T,
pub end: SourcePos,
}
impl<T: Into<SourcePos> + Clone + Copy> SourceRange<T> {
pub fn new(start: T, end: SourcePos) -> Self {
Self { start, end }
}
/// Gets if the source range contains the other source range inclusive.
pub fn contains<U: Into<SourcePos> + Clone + Copy>(&self, other: &SourceRange<U>) -> bool {
let start: SourcePos = self.start.into();
let other_start: SourcePos = other.start.into();
start <= other_start && self.end >= other.end
}
}
impl SourceRange<SourcePos> {
/// Gets the relative byte range based on the source text's start position.
pub fn as_byte_range(&self, source_start: StartSourcePos) -> std::ops::Range<usize> {
let start = self.start - source_start;
let end = self.end - source_start;
start..end
}
/// Do not use this except when receiving an swc span
/// from swc and needing to convert it to a source position.
/// Generally, prefer using the `.range()` method.
pub fn unsafely_from_span(span: Span) -> Self {
SourceRange::new(SourcePos::unsafely_from_byte_pos(span.lo), SourcePos::unsafely_from_byte_pos(span.hi))
}
}
impl SourceRange<StartSourcePos> {
/// Gets the relative byte range based on the source text's start position.
pub fn as_byte_range(&self) -> std::ops::Range<usize> {
let end = self.end - self.start;
0..end
}
}
impl<T: Into<SourcePos> + Clone + Copy> SourceRanged for SourceRange<T> {
fn start(&self) -> SourcePos {
self.start.into()
}
fn end(&self) -> SourcePos {
self.end
}
}
// Only want Into and not From in order to prevent
// people from creating one of these easily.
#[allow(clippy::from_over_into)]
impl Into<Span> for SourceRange {
fn into(self) -> Span {
Span::new(self.start.as_byte_pos(), self.end.as_byte_pos(), Default::default())
}
}
macro_rules! source_ranged_trait {
() => {
fn range(&self) -> SourceRange {
SourceRange {
start: self.start(),
end: self.end(),
}
}
fn byte_width(&self) -> usize {
self.end() - self.start()
}
fn start_line_fast<'a, P: SourceTextInfoProvider<'a>>(&self, source: P) -> usize {
source.text_info().line_index(self.start())
}
fn end_line_fast<'a, P: SourceTextInfoProvider<'a>>(&self, source: P) -> usize {
source.text_info().line_index(self.end())
}
fn start_column_fast<'a, P: SourceTextInfoProvider<'a>>(&self, source: P) -> usize {
self.column_at_pos(source, self.start())
}
fn end_column_fast<'a, P: SourceTextInfoProvider<'a>>(&self, source: P) -> usize {
self.column_at_pos(source, self.end())
}
fn column_at_pos<'a, P: SourceTextInfoProvider<'a>>(&self, source: P, pos: SourcePos) -> usize {
let text_info = source.text_info();
let text_bytes = text_info.text_str().as_bytes();
let pos = pos - text_info.range().start;
let mut line_start = 0;
for i in (0..pos).rev() {
if text_bytes[i] == b'\n' {
line_start = i + 1;
break;
}
}
let text_slice = &text_info.text_str()[line_start..pos];
text_slice.chars().count()
}
fn char_width_fast<'a, P: SourceTextInfoProvider<'a>>(&self, source: P) -> usize {
self.text_fast(source).chars().count()
}
fn text_fast<'a, P: SourceTextInfoProvider<'a>>(&self, source: P) -> &'a str {
let text_info = source.text_info();
let byte_range = self.range().as_byte_range(text_info.range().start);
&text_info.text_str()[byte_range]
}
fn tokens_fast<'a>(&self, program: impl RootNode<'a>) -> &'a [TokenAndSpan] {
let token_container = program.token_container();
token_container.get_tokens_in_range(self.start(), self.end())
}
fn leading_comments_fast<'a>(&self, program: impl RootNode<'a>) -> CommentsIterator<'a> {
program.comment_container().leading_comments(self.start())
}
fn trailing_comments_fast<'a>(&self, program: impl RootNode<'a>) -> CommentsIterator<'a> {
program.comment_container().trailing_comments(self.end())
}
fn previous_token_fast<'a>(&self, program: impl RootNode<'a>) -> Option<&'a TokenAndSpan> {
program.token_container().get_previous_token(self.start())
}
fn next_token_fast<'a>(&self, program: impl RootNode<'a>) -> Option<&'a TokenAndSpan> {
program.token_container().get_next_token(self.end())
}
fn previous_tokens_fast<'a>(&self, program: impl RootNode<'a>) -> &'a [TokenAndSpan] {
let token_container = program.token_container();
let index = token_container
.get_token_index_at_start(self.start())
// fallback
.or_else(|| token_container.get_token_index_at_end(self.start()))
.unwrap_or_else(|| panic!("The specified start position ({}) did not have a token index.", self.start()));
&token_container.tokens[0..index]
}
fn next_tokens_fast<'a>(&self, program: impl RootNode<'a>) -> &'a [TokenAndSpan] {
let token_container = program.token_container();
let index = token_container
.get_token_index_at_end(self.end())
// fallback
.or_else(|| token_container.get_token_index_at_start(self.end()))
.unwrap_or_else(|| panic!("The specified end position ({}) did not have a token index.", self.end()));
&token_container.tokens[index + 1..]
}
};
}
pub trait SourceRanged {
fn start(&self) -> SourcePos;
fn end(&self) -> SourcePos;
source_ranged_trait!();
}
impl<'a, S> SourceRanged for &'a S
where
S: ?Sized + SourceRanged + 'a,
{
fn start(&self) -> SourcePos {
<S as SourceRanged>::start(*self)
}
fn end(&self) -> SourcePos {
<S as SourceRanged>::end(*self)
}
}
/// Adds source position helper methods for swc types that implement
/// `swc_common::Spanned`.
///
/// There were conflicts with implementing `SourceRanged` for `&SourceRanged`
/// with swc's Spanned implementation, so this needed to be a separate trait
/// unfortunately and I couldn't figure out how to combine it with `SourceRanged`
pub trait SourceRangedForSpanned {
fn start(&self) -> SourcePos;
fn end(&self) -> SourcePos;
source_ranged_trait!();
}
impl<T> SourceRangedForSpanned for T
where
T: swc_common::Spanned,
{
fn start(&self) -> SourcePos {
SourcePos::unsafely_from_byte_pos(self.span().lo)
}
fn end(&self) -> SourcePos {
SourcePos::unsafely_from_byte_pos(self.span().hi)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn source_range_contains() {
let start_pos = StartSourcePos::START_SOURCE_POS;
assert!(SourceRange::new(start_pos, start_pos + 5).contains(&SourceRange::new(start_pos + 1, start_pos + 2)));
assert!(SourceRange::new(start_pos + 1, start_pos + 5).contains(&SourceRange::new(start_pos + 1, start_pos + 2)));
assert!(!SourceRange::new(start_pos + 2, start_pos + 5).contains(&SourceRange::new(start_pos + 1, start_pos + 2)));
assert!(SourceRange::new(start_pos + 1, start_pos + 3).contains(&SourceRange::new(start_pos + 1, start_pos + 2)));
assert!(SourceRange::new(start_pos + 1, start_pos + 2).contains(&SourceRange::new(start_pos + 1, start_pos + 2)));
assert!(!SourceRange::new(start_pos + 1, start_pos + 1).contains(&SourceRange::new(start_pos + 1, start_pos + 2)));
}
}

View file

@ -0,0 +1,27 @@
pub const BOM_CHAR: char = '\u{FEFF}';
/// Strips the byte order mark if it exists from the provided text.
pub fn strip_bom_mut(text: &mut String) {
if text.starts_with(BOM_CHAR) {
text.drain(..BOM_CHAR.len_utf8());
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn strip_bom_mut_with_bom() {
let mut text = format!("{}text", BOM_CHAR);
strip_bom_mut(&mut text);
assert_eq!(text, "text");
}
#[test]
fn strip_bom_mut_without_bom() {
let mut text = "text".to_string();
strip_bom_mut(&mut text);
assert_eq!(text, "text");
}
}

View file

@ -0,0 +1,338 @@
use std::sync::Arc;
use swc_common::input::StringInput;
pub use text_lines::LineAndColumnIndex;
pub use text_lines::TextLines;
use super::pos::*;
use super::text_encoding::strip_bom_mut;
use super::text_encoding::BOM_CHAR;
use super::LineAndColumnDisplay;
/// Stores the source text along with other data such as where all the lines
/// occur in the text.
///
/// Note: This struct is cheap to clone.
#[derive(Clone)]
pub struct SourceTextInfo {
// keep this struct cheap to clone
start_pos: StartSourcePos,
text: Arc<str>,
text_lines: Arc<TextLines>,
}
impl SourceTextInfo {
/// Creates a new `SourceTextInfo` from the provided source text.
pub fn new(text: Arc<str>) -> Self {
Self::new_with_pos(StartSourcePos::START_SOURCE_POS.as_source_pos(), text)
}
/// Creates a new `SourceTextInfo` from the provided source start position
/// and source text.
///
/// Note: When bundling swc will keep increasing the start position for
/// each source file.
pub fn new_with_pos(start_pos: SourcePos, text: Arc<str>) -> Self {
// The BOM should be stripped before it gets passed here
// because it's a text encoding concern that should be
// stripped when the file is read.
// todo(dsherret): re-enable after removing the below
// assert!(!text.starts_with(BOM_CHAR), "BOM should be stripped before creating a SourceTextInfo.");
// todo(dsherret): remove this once handled downstream
let text = if text.starts_with(BOM_CHAR) {
let mut text = text.to_string();
strip_bom_mut(&mut text);
text.into()
} else {
text
};
Self::new_with_indent_width(start_pos, text, 2)
}
/// Creates a new `SourceTextInfo` from the provided start position,
/// source text, and indentation width.
///
/// The indentation width determines the number of columns to use
/// when going over a tab character. For example, an indent width
/// of 2 will mean each tab character will represent 2 columns.
/// The default indentation width used in the other methods is `2`
/// to match the default indentation used by `deno fmt`.
pub fn new_with_indent_width(start_pos: SourcePos, text: Arc<str>, indent_width: usize) -> Self {
Self {
start_pos: StartSourcePos(start_pos),
text_lines: Arc::new(TextLines::with_indent_width(&text, indent_width)),
text,
}
}
/// Creates a new `SourceTextInfo` from the provided source text.
///
/// Generally, prefer using `SourceTextInfo::new` to provide a
/// string already in an `std::sync::Arc`.
pub fn from_string(text: String) -> Self {
Self::new(text.into())
}
/// Gets an swc `StringInput` for this text information that can be
/// used with parsing.
pub fn as_string_input(&self) -> StringInput {
let range = self.range();
StringInput::new(self.text_str(), range.start.as_byte_pos(), range.end.as_byte_pos())
}
/// Gets the source text.
pub fn text(&self) -> Arc<str> {
self.text.clone()
}
/// Gets a reference to the source text.
pub fn text_str(&self) -> &str {
&self.text
}
/// Gets the range—start and end byte position—of the source text.
pub fn range(&self) -> SourceRange<StartSourcePos> {
SourceRange::new(self.start_pos, self.start_pos + self.text.len())
}
/// Gets the number of lines in the source text.
pub fn lines_count(&self) -> usize {
self.text_lines.lines_count()
}
/// Gets the 0-indexed line index at the provided byte position.
///
/// Note that this will panic when providing a byte position outside
/// the range of the source text.
pub fn line_index(&self, pos: SourcePos) -> usize {
self.assert_pos(pos);
self.text_lines.line_index(self.get_relative_index_from_pos(pos))
}
/// Gets the line start byte position of the provided 0-indexed line index.
///
/// Note that this will panic if providing a line index outside the
/// bounds of the number of lines.
pub fn line_start(&self, line_index: usize) -> SourcePos {
self.assert_line_index(line_index);
self.get_pos_from_relative_index(self.text_lines.line_start(line_index))
}
/// Gets the line end byte position of the provided 0-indexed line index.
///
/// Note that this will panic if providing a line index outside the
/// bounds of the number of lines.
pub fn line_end(&self, line_index: usize) -> SourcePos {
self.assert_line_index(line_index);
self.get_pos_from_relative_index(self.text_lines.line_end(line_index))
}
/// Gets the 0-indexed line and column index of the provided byte position.
///
/// Note that this will panic when providing a byte position outside
/// the range of the source text.
pub fn line_and_column_index(&self, pos: SourcePos) -> LineAndColumnIndex {
self.assert_pos(pos);
self.text_lines.line_and_column_index(self.get_relative_index_from_pos(pos))
}
/// Gets the 1-indexed line and column index of the provided byte position
/// taking into account the default indentation width.
///
/// Note that this will panic when providing a byte position outside
/// the range of the source text.
pub fn line_and_column_display(&self, pos: SourcePos) -> LineAndColumnDisplay {
self.assert_pos(pos);
self.text_lines.line_and_column_display(self.get_relative_index_from_pos(pos))
}
/// Gets the 1-indexed line and column index of the provided byte position
/// with a custom indentation width.
///
/// Note that this will panic when providing a byte position outside
/// the range of the source text.
pub fn line_and_column_display_with_indent_width(&self, pos: SourcePos, indent_width: usize) -> LineAndColumnDisplay {
self.assert_pos(pos);
self
.text_lines
.line_and_column_display_with_indent_width(self.get_relative_index_from_pos(pos), indent_width)
}
/// Gets the source position of the provided line and column index.
///
/// Note that this will panic if providing a line index outside the
/// bounds of the number of lines, but will clip the the line end byte index
/// when exceeding the line length.
pub fn loc_to_source_pos(&self, line_and_column_index: LineAndColumnIndex) -> SourcePos {
self.assert_line_index(line_and_column_index.line_index);
self.get_pos_from_relative_index(self.text_lines.byte_index(line_and_column_index))
}
/// Gets a reference to the text slice of the line at the provided
/// 0-based index.
///
/// Note that this will panic if providing a line index outside the
/// bounds of the number of lines.
pub fn line_text(&self, line_index: usize) -> &str {
let range = SourceRange {
start: self.line_start(line_index),
end: self.line_end(line_index),
};
self.range_text(&range)
}
/// Gets the source text located within the provided range.
pub fn range_text(&self, range: &SourceRange) -> &str {
let start = self.get_relative_index_from_pos(range.start);
let end = self.get_relative_index_from_pos(range.end);
&self.text_str()[start..end]
}
fn assert_pos(&self, pos: SourcePos) {
let range = self.range();
if pos < range.start {
panic!("The provided position {} was less than the start position {}.", pos, range.start,);
} else if pos > range.end {
panic!("The provided position {} was greater than the end position {}.", pos, range.end,);
}
}
fn assert_line_index(&self, line_index: usize) {
if line_index >= self.lines_count() {
panic!(
"The specified line index {} was greater or equal to the number of lines of {}.",
line_index,
self.lines_count()
);
}
}
fn get_relative_index_from_pos(&self, pos: SourcePos) -> usize {
pos - self.start_pos
}
fn get_pos_from_relative_index(&self, relative_index: usize) -> SourcePos {
self.start_pos + relative_index
}
}
impl std::fmt::Debug for SourceTextInfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SourceTextInfo")
.field("start_pos", &self.start_pos)
.field("text", &self.text)
.finish()
}
}
pub trait SourceTextInfoProvider<'a> {
fn text_info(&self) -> &'a SourceTextInfo;
}
impl<'a> SourceTextInfoProvider<'a> for &'a SourceTextInfo {
fn text_info(&self) -> &'a SourceTextInfo {
self
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn line_and_column_index() {
let text = "12\n3\r\nβ\n5";
for i in 0..10 {
run_with_text_info(SourceTextInfo::new_with_pos(SourcePos::new(i), text.to_string().into()), i);
}
fn run_with_text_info(text_info: SourceTextInfo, i: usize) {
assert_pos_line_and_col(&text_info, i, 0, 0); // 1
assert_pos_line_and_col(&text_info, 1 + i, 0, 1); // 2
assert_pos_line_and_col(&text_info, 2 + i, 0, 2); // \n
assert_pos_line_and_col(&text_info, 3 + i, 1, 0); // 3
assert_pos_line_and_col(&text_info, 4 + i, 1, 1); // \r
assert_pos_line_and_col(&text_info, 5 + i, 1, 2); // \n
assert_pos_line_and_col(&text_info, 6 + i, 2, 0); // first β index
assert_pos_line_and_col(&text_info, 7 + i, 2, 0); // second β index
assert_pos_line_and_col(&text_info, 8 + i, 2, 1); // \n
assert_pos_line_and_col(&text_info, 9 + i, 3, 0); // 5
assert_pos_line_and_col(&text_info, 10 + i, 3, 1); // <EOF>
}
}
fn assert_pos_line_and_col(text_info: &SourceTextInfo, pos: usize, line_index: usize, column_index: usize) {
assert_eq!(
text_info.line_and_column_index(SourcePos::new(pos)),
LineAndColumnIndex { line_index, column_index }
);
}
#[test]
#[should_panic(expected = "The provided position 0 was less than the start position 1.")]
fn line_and_column_index_panic_less_than() {
let info = SourceTextInfo::new_with_pos(SourcePos::new(1), "test".to_string().into());
info.line_and_column_index(SourcePos::new(0));
}
#[test]
#[should_panic(expected = "The provided position 6 was greater than the end position 5.")]
fn line_and_column_index_panic_greater_than() {
let info = SourceTextInfo::new_with_pos(SourcePos::new(1), "test".to_string().into());
info.line_and_column_index(SourcePos::new(6));
}
#[test]
fn line_start() {
let text = "12\n3\r\n4\n5";
for i in 0..10 {
run_with_text_info(SourceTextInfo::new_with_pos(SourcePos::new(i), text.to_string().into()), i);
}
fn run_with_text_info(text_info: SourceTextInfo, i: usize) {
assert_line_start(&text_info, 0, i);
assert_line_start(&text_info, 1, 3 + i);
assert_line_start(&text_info, 2, 6 + i);
assert_line_start(&text_info, 3, 8 + i);
}
}
fn assert_line_start(text_info: &SourceTextInfo, line_index: usize, line_end: usize) {
assert_eq!(text_info.line_start(line_index), SourcePos::new(line_end));
}
#[test]
#[should_panic(expected = "The specified line index 1 was greater or equal to the number of lines of 1.")]
fn line_start_equal_number_lines() {
let info = SourceTextInfo::new_with_pos(SourcePos::new(1), "test".to_string().into());
info.line_start(1);
}
#[test]
fn line_end() {
let text = "12\n3\r\n4\n5";
for i in 0..10 {
run_with_text_info(SourceTextInfo::new_with_pos(SourcePos::new(i), text.to_string().into()), i);
}
fn run_with_text_info(text_info: SourceTextInfo, i: usize) {
assert_line_end(&text_info, 0, 2 + i);
assert_line_end(&text_info, 1, 4 + i);
assert_line_end(&text_info, 2, 7 + i);
assert_line_end(&text_info, 3, 9 + i);
}
}
fn assert_line_end(text_info: &SourceTextInfo, line_index: usize, line_end: usize) {
assert_eq!(text_info.line_end(line_index), SourcePos::new(line_end));
}
#[test]
#[should_panic(expected = "The specified line index 1 was greater or equal to the number of lines of 1.")]
fn line_end_equal_number_lines() {
let info = SourceTextInfo::new_with_pos(SourcePos::new(1), "test".to_string().into());
info.line_end(1);
}
}

View file

@ -0,0 +1,147 @@
use rustc_hash::FxHashMap;
use super::pos::*;
use crate::swc::parser::token::TokenAndSpan;
pub struct TokenContainer<'a> {
pub tokens: &'a [TokenAndSpan],
// Uses an FxHashMap because it has faster lookups for u32 keys than the default hasher.
start_to_index: FxHashMap<SourcePos, usize>,
end_to_index: FxHashMap<SourcePos, usize>,
}
impl<'a> TokenContainer<'a> {
pub fn new(tokens: &'a [TokenAndSpan]) -> Self {
TokenContainer {
tokens,
start_to_index: tokens.iter().enumerate().map(|(i, token)| (token.start(), i)).collect(),
end_to_index: tokens.iter().enumerate().map(|(i, token)| (token.end(), i)).collect(),
}
}
pub fn get_token_index_at_start(&self, start: SourcePos) -> Option<usize> {
self.start_to_index.get(&start).copied()
}
pub fn get_token_index_at_end(&self, end: SourcePos) -> Option<usize> {
self.end_to_index.get(&end).copied()
}
pub fn get_token_at_index(&self, index: usize) -> Option<&TokenAndSpan> {
self.tokens.get(index)
}
pub fn get_tokens_in_range(&self, start: SourcePos, end: SourcePos) -> &'a [TokenAndSpan] {
let start_index = self.get_leftmost_token_index(start);
let end_index = self.get_rightmost_token_index(end);
let start_index = start_index.unwrap_or_else(|| end_index.unwrap_or(0));
let end_index = end_index.map(|i| i + 1).unwrap_or(start_index);
&self.tokens[start_index..end_index]
}
fn get_leftmost_token_index(&self, start: SourcePos) -> Option<usize> {
if let Some(&start_index) = self.start_to_index.get(&start) {
Some(start_index)
// fallback
} else if let Some(&start_index) = self.end_to_index.get(&start) {
Some(start_index + 1)
} else {
// todo: binary search leftmost
for (i, token) in self.tokens.iter().enumerate() {
if token.start() >= start {
return Some(i);
}
}
None
}
}
fn get_rightmost_token_index(&self, end: SourcePos) -> Option<usize> {
if let Some(&end_index) = self.end_to_index.get(&end) {
Some(end_index)
// fallback
} else if let Some(&end_index) = self.start_to_index.get(&end) {
if end_index > 0 {
Some(end_index - 1)
} else {
None
}
} else {
// todo: binary search rightmost
for (i, token) in self.tokens.iter().enumerate().rev() {
if token.end() <= end {
return Some(i);
}
}
None
}
}
pub fn get_previous_token(&self, start: SourcePos) -> Option<&TokenAndSpan> {
let index = self.start_to_index.get(&start);
if let Some(&index) = index {
if index == 0 {
None
} else {
Some(&self.tokens[index - 1])
}
} else {
// todo: binary search leftmost
let mut last_token = None;
for token in self.tokens {
if token.end() > start {
return last_token;
} else {
last_token = Some(token);
}
}
None
}
}
pub fn get_next_token(&self, end: SourcePos) -> Option<&TokenAndSpan> {
if let Some(index) = self.end_to_index.get(&end) {
self.tokens.get(index + 1)
} else {
// todo: binary search rightmost
for token in self.tokens {
if token.start() > end {
return Some(token);
}
}
None
}
}
}
#[cfg(test)]
mod test {
use std::path::PathBuf;
use super::super::pos::SourcePos;
use super::TokenContainer;
use crate::common::SourceRangedForSpanned;
use crate::test_helpers::*;
#[test]
fn get_next_token() {
let (_, tokens, _, _) = get_swc_module(&PathBuf::from("path.js"), r#"let /* a */ a = 5;"#);
let token_container = TokenContainer::new(&tokens);
// low token of previous token
assert_eq!(token_container.get_next_token(SourcePos::new(0)).unwrap().start(), SourcePos::new(12));
// hi of previous token
assert_eq!(token_container.get_next_token(SourcePos::new(3)).unwrap().start(), SourcePos::new(12));
// in comment before token
assert_eq!(token_container.get_next_token(SourcePos::new(5)).unwrap().start(), SourcePos::new(12));
// in whitespace before token
assert_eq!(token_container.get_next_token(SourcePos::new(11)).unwrap().start(), SourcePos::new(12));
// at hi of last token
assert_eq!(token_container.get_next_token(SourcePos::new(18)), None);
}
}

View file

@ -0,0 +1,41 @@
use crate::swc::parser::token::TokenAndSpan;
use super::comments::CommentContainer;
use super::text_info::*;
use super::tokens::TokenContainer;
/// A Module or Script node.
pub trait RootNode<'a> {
fn maybe_text_info(&self) -> Option<&'a SourceTextInfo>;
fn maybe_token_container(&self) -> Option<&'a TokenContainer<'a>>;
fn maybe_comment_container(&self) -> Option<&'a CommentContainer<'a>>;
fn token_at_index(&self, index: usize) -> Option<&'a TokenAndSpan> {
self.token_container().get_token_at_index(index)
}
fn token_container(&self) -> &'a TokenContainer<'a> {
self
.maybe_token_container()
.as_ref()
.expect("The tokens must be provided to `with_view` in order to use this method.")
}
fn comment_container(&self) -> &'a CommentContainer<'a> {
self
.maybe_comment_container()
.as_ref()
.expect("The comments must be provided to `with_view` in order to use this method.")
}
}
impl<'a, T> SourceTextInfoProvider<'a> for T
where
T: RootNode<'a>,
{
fn text_info(&self) -> &'a SourceTextInfo {
self
.maybe_text_info()
.expect("The source file must be provided to `with_view` in order to use this method.")
}
}

View file

@ -0,0 +1,15 @@
pub mod common;
#[cfg(test)]
pub(crate) mod test_helpers;
#[cfg(feature = "view")]
pub mod view;
/// swc re-exports
pub mod swc {
pub use swc_atoms as atoms;
pub use swc_common as common;
pub use swc_ecma_ast as ast;
pub use swc_ecma_parser as parser;
}

View file

@ -0,0 +1,112 @@
use crate::swc::ast::EsVersion;
use crate::swc::ast::Module;
use crate::swc::common::comments::SingleThreadedComments;
use crate::swc::common::errors::DiagnosticBuilder;
use crate::swc::common::errors::Emitter;
use crate::swc::common::errors::Handler;
use crate::swc::parser::lexer::Lexer;
use crate::swc::parser::token::TokenAndSpan;
use crate::swc::parser::Capturing;
use crate::swc::parser::Parser;
use crate::swc::parser::Syntax;
use std::path::Path;
use crate::common::SourceTextInfo;
pub fn get_swc_module(file_path: &Path, file_text: &str) -> (Module, Vec<TokenAndSpan>, SourceTextInfo, SingleThreadedComments) {
// lifted from dprint-plugin-typescript
let handler = Handler::with_emitter(false, false, Box::new(EmptyEmitter {}));
let source_text_info = SourceTextInfo::from_string(file_text.to_string());
let comments: SingleThreadedComments = Default::default();
return {
let ts_config = crate::swc::parser::TsConfig {
tsx: should_parse_as_jsx(file_path),
decorators: true,
..Default::default()
};
let lexer = Lexer::new(
Syntax::Typescript(ts_config),
EsVersion::Es2022,
source_text_info.as_string_input(),
Some(&comments),
);
let lexer = Capturing::new(lexer);
let mut parser = Parser::new_from(lexer);
let parse_module_result = parser.parse_module();
let tokens = parser.input().take();
match parse_module_result {
Err(error) => {
// mark the diagnostic as being handled (otherwise it will panic in its drop)
let mut diagnostic = error.into_diagnostic(&handler);
diagnostic.cancel();
// return the formatted diagnostic string
Err(diagnostic.message())
}
Ok(module) => Ok((module, tokens, source_text_info, comments)),
}
}
.unwrap();
}
#[cfg(feature = "view")]
pub fn get_swc_script(file_path: &Path, file_text: &str) -> (crate::swc::ast::Script, Vec<TokenAndSpan>, SourceTextInfo, SingleThreadedComments) {
// lifted from dprint-plugin-typescript
let handler = Handler::with_emitter(false, false, Box::new(EmptyEmitter {}));
let source_text_info = SourceTextInfo::from_string(file_text.to_string());
let comments: SingleThreadedComments = Default::default();
return {
let ts_config = crate::swc::parser::TsConfig {
tsx: should_parse_as_jsx(file_path),
decorators: true,
..Default::default()
};
let lexer = Lexer::new(
Syntax::Typescript(ts_config),
EsVersion::Es2022,
source_text_info.as_string_input(),
Some(&comments),
);
let lexer = Capturing::new(lexer);
let mut parser = Parser::new_from(lexer);
let parse_script_result = parser.parse_script();
let tokens = parser.input().take();
match parse_script_result {
Err(error) => {
// mark the diagnostic as being handled (otherwise it will panic in its drop)
let mut diagnostic = error.into_diagnostic(&handler);
diagnostic.cancel();
// return the formatted diagnostic string
Err(diagnostic.message())
}
Ok(script) => Ok((script, tokens, source_text_info, comments)),
}
}
.unwrap();
}
fn should_parse_as_jsx(file_path: &Path) -> bool {
if let Some(extension) = get_lowercase_extension(file_path) {
return extension == "tsx" || extension == "jsx" || extension == "js" || extension == "mjs";
}
true
}
fn get_lowercase_extension(file_path: &Path) -> Option<String> {
file_path.extension().and_then(|e| e.to_str()).map(|f| f.to_lowercase())
}
pub struct EmptyEmitter {}
impl Emitter for EmptyEmitter {
fn emit(&mut self, _: &DiagnosticBuilder<'_>) {
// for now, we don't care about diagnostics so do nothing
}
fn should_show_explain(&self) -> bool {
false
}
}

View file

@ -0,0 +1,24 @@
use super::generated::BindingIdent;
use super::generated::Ident;
use crate::swc::ast::Id;
use crate::swc::common::SyntaxContext;
impl<'a> Ident<'a> {
pub fn to_id(&self) -> Id {
(self.sym().clone(), self.ctxt())
}
pub fn ctxt(&self) -> SyntaxContext {
self.inner.span.ctxt
}
}
impl<'a> BindingIdent<'a> {
pub fn to_id(&self) -> Id {
self.id.to_id()
}
pub fn ctxt(&self) -> SyntaxContext {
self.id.ctxt()
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,29 @@
mod custom;
mod types;
#[cfg(test)]
mod test_helpers;
#[allow(clippy::all)]
#[rustfmt::skip]
mod generated;
pub use custom::*;
pub use generated::*;
pub use types::*;
#[cfg(test)]
mod test {
use crate::common::SourceRanged;
use crate::view::test_helpers::*;
#[test]
fn trailing_comments_start_of_file_no_match() {
run_test(r#"5 // test"#, |program| {
// previously there was a bug here where it would return the
// comments after the token
let trailing_comments = program.range().start.trailing_comments_fast(program);
assert!(trailing_comments.is_empty());
});
}
}

View file

@ -0,0 +1,44 @@
use std::path::Path;
use crate::test_helpers::get_swc_module;
use crate::test_helpers::get_swc_script;
pub fn run_test<'a>(file_text: &str, run_test: impl Fn(super::Program<'a>)) {
let file_path = Path::new("test.ts");
run_test_with_module(file_path, file_text, |module| run_test(super::Program::Module(module)));
run_test_with_script(file_path, file_text, |script| run_test(super::Program::Script(script)));
}
pub fn run_test_with_module<'a>(file_path: &Path, file_text: &str, run_test: impl Fn(&'a super::Module<'a>)) {
let (module, tokens, text_info, comments) = get_swc_module(file_path, file_text);
let (leading, trailing) = comments.borrow_all();
let info = super::ModuleInfo {
module: &module,
text_info: Some(&text_info),
tokens: Some(&tokens),
comments: Some(super::Comments {
leading: &leading,
trailing: &trailing,
}),
};
super::with_ast_view_for_module(info, |module| {
run_test(module);
});
}
pub fn run_test_with_script<'a>(file_path: &Path, file_text: &str, run_test: impl Fn(&'a super::Script<'a>)) {
let (script, tokens, text_info, comments) = get_swc_script(file_path, file_text);
let (leading, trailing) = comments.borrow_all();
let info = super::ScriptInfo {
script: &script,
text_info: Some(&text_info),
tokens: Some(&tokens),
comments: Some(super::Comments {
leading: &leading,
trailing: &trailing,
}),
};
super::with_ast_view_for_script(info, |script| {
run_test(script);
});
}

View file

@ -0,0 +1,531 @@
use super::generated::*;
use crate::common::*;
use crate::swc::ast as swc_ast;
use crate::swc::common::comments::SingleThreadedCommentsMapInner;
use crate::swc::parser::token::TokenAndSpan;
pub enum NodeOrToken<'a> {
Node(Node<'a>),
Token(&'a TokenAndSpan),
}
impl<'a> NodeOrToken<'a> {
pub fn unwrap_token(&self) -> &'a TokenAndSpan {
match self {
NodeOrToken::Token(token) => token,
NodeOrToken::Node(node) => panic!("Expected to unwrap a token, but it was a node of kind {}.", node.kind()),
}
}
pub fn unwrap_node(&self) -> &Node<'a> {
match self {
NodeOrToken::Node(node) => node,
NodeOrToken::Token(token) => panic!("Expected to unwrap a node, but it was a token with text '{:?}'.", token.token),
}
}
}
impl<'a> SourceRanged for NodeOrToken<'a> {
fn start(&self) -> SourcePos {
match self {
NodeOrToken::Node(node) => node.start(),
NodeOrToken::Token(token) => token.start(),
}
}
fn end(&self) -> SourcePos {
match self {
NodeOrToken::Node(node) => node.end(),
NodeOrToken::Token(token) => token.end(),
}
}
}
macro_rules! implement_root_node {
($name:ty) => {
impl<'a> RootNode<'a> for $name {
fn maybe_text_info(&self) -> Option<&'a SourceTextInfo> {
self.text_info
}
fn maybe_token_container(&self) -> Option<&'a TokenContainer<'a>> {
self.tokens
}
fn maybe_comment_container(&self) -> Option<&'a CommentContainer<'a>> {
self.comments
}
}
};
}
implement_root_node!(Module<'a>);
implement_root_node!(&Module<'a>);
implement_root_node!(Script<'a>);
implement_root_node!(&Script<'a>);
/// A Module or Script node.
#[derive(Clone, Copy)]
pub enum Program<'a> {
Module(&'a Module<'a>),
Script(&'a Script<'a>),
}
impl<'a> SourceRanged for Program<'a> {
fn start(&self) -> SourcePos {
match self {
Program::Module(node) => node.start(),
Program::Script(node) => node.start(),
}
}
fn end(&self) -> SourcePos {
match self {
Program::Module(node) => node.end(),
Program::Script(node) => node.end(),
}
}
}
impl<'a> NodeTrait<'a> for Program<'a> {
fn parent(&self) -> Option<Node<'a>> {
None
}
fn children(&self) -> Vec<Node<'a>> {
match self {
Program::Module(node) => node.children(),
Program::Script(node) => node.children(),
}
}
fn as_node(&self) -> Node<'a> {
match self {
Program::Module(node) => node.as_node(),
Program::Script(node) => node.as_node(),
}
}
fn kind(&self) -> NodeKind {
match self {
Program::Module(node) => node.kind(),
Program::Script(node) => node.kind(),
}
}
}
impl<'a> From<&Program<'a>> for Node<'a> {
fn from(node: &Program<'a>) -> Node<'a> {
match node {
Program::Module(node) => (*node).into(),
Program::Script(node) => (*node).into(),
}
}
}
impl<'a> From<Program<'a>> for Node<'a> {
fn from(node: Program<'a>) -> Node<'a> {
match node {
Program::Module(node) => node.into(),
Program::Script(node) => node.into(),
}
}
}
impl<'a> RootNode<'a> for Program<'a> {
fn maybe_text_info(&self) -> Option<&'a SourceTextInfo> {
match self {
Program::Module(module) => module.text_info,
Program::Script(script) => script.text_info,
}
}
fn maybe_token_container(&self) -> Option<&'a TokenContainer<'a>> {
match self {
Program::Module(module) => module.tokens,
Program::Script(script) => script.tokens,
}
}
fn maybe_comment_container(&self) -> Option<&'a CommentContainer<'a>> {
match self {
Program::Module(module) => module.comments,
Program::Script(script) => script.comments,
}
}
}
pub trait NodeTrait<'a>: SourceRanged + Sized {
fn parent(&self) -> Option<Node<'a>>;
fn children(&self) -> Vec<Node<'a>>;
fn as_node(&self) -> Node<'a>;
fn kind(&self) -> NodeKind;
fn ancestors(&self) -> AncestorIterator<'a> {
AncestorIterator::new(self.as_node())
}
fn start_line(&self) -> usize {
self.start_line_fast(self.program())
}
fn end_line(&self) -> usize {
self.end_line_fast(self.program())
}
fn start_column(&self) -> usize {
self.start_column_fast(self.program())
}
fn end_column(&self) -> usize {
self.end_column_fast(self.program())
}
fn char_width(&self) -> usize {
self.char_width_fast(self.program())
}
fn child_index(&self) -> usize {
if let Some(parent) = self.parent() {
let start_pos = self.start();
for (i, child) in parent.children().iter().enumerate() {
if child.start() == start_pos {
return i;
}
}
panic!("Could not find the child index for some reason.");
} else {
0
}
}
fn previous_sibling(&self) -> Option<Node<'a>> {
if let Some(parent) = self.parent() {
let child_index = self.child_index();
if child_index > 0 {
Some(parent.children().remove(child_index - 1))
} else {
None
}
} else {
None
}
}
/// Gets the previous siblings in the order they appear in the file.
fn previous_siblings(&self) -> Vec<Node<'a>> {
if let Some(parent) = self.parent() {
let child_index = self.child_index();
if child_index > 0 {
let mut parent_children = parent.children();
parent_children.drain(child_index..);
parent_children
} else {
Vec::new()
}
} else {
Vec::new()
}
}
/// Gets the next siblings in the order they appear in the file.
fn next_sibling(&self) -> Option<Node<'a>> {
if let Some(parent) = self.parent() {
let next_index = self.child_index() + 1;
let mut parent_children = parent.children();
if next_index < parent_children.len() {
Some(parent_children.remove(next_index))
} else {
None
}
} else {
None
}
}
fn next_siblings(&self) -> Vec<Node<'a>> {
if let Some(parent) = self.parent() {
let next_index = self.child_index() + 1;
let mut parent_children = parent.children();
if next_index < parent_children.len() {
parent_children.drain(0..next_index);
parent_children
} else {
Vec::new()
}
} else {
Vec::new()
}
}
fn tokens(&self) -> &'a [TokenAndSpan] {
self.tokens_fast(self.program())
}
fn children_with_tokens(&self) -> Vec<NodeOrToken<'a>> {
self.children_with_tokens_fast(self.program())
}
fn children_with_tokens_fast(&self, program: impl RootNode<'a>) -> Vec<NodeOrToken<'a>> {
let children = self.children();
let tokens = self.tokens_fast(program);
let mut result = Vec::new();
let mut tokens_index = 0;
for child in children {
let child_range = child.range();
// get the tokens before the current child
for token in &tokens[tokens_index..] {
if token.start() < child_range.start {
result.push(NodeOrToken::Token(token));
tokens_index += 1;
} else {
break;
}
}
// push current child
result.push(NodeOrToken::Node(child));
// skip past all the tokens within the token
for token in &tokens[tokens_index..] {
if token.end() <= child_range.end {
tokens_index += 1;
} else {
break;
}
}
}
// get the tokens after the children
for token in &tokens[tokens_index..] {
result.push(NodeOrToken::Token(token));
}
result
}
fn leading_comments(&self) -> CommentsIterator<'a> {
self.leading_comments_fast(self.program())
}
fn trailing_comments(&self) -> CommentsIterator<'a> {
self.trailing_comments_fast(self.program())
}
/// Gets the root node.
fn program(&self) -> Program<'a> {
let mut current: Node<'a> = self.as_node();
while let Some(parent) = current.parent() {
current = parent;
}
// the top-most node will always be a script or module
match current {
Node::Module(module) => Program::Module(module),
Node::Script(script) => Program::Script(script),
_ => panic!("Expected the root node to be a Module or Script, but it was a {}.", current.kind()),
}
}
/// Gets the root node if the view was created from a Module; otherwise panics.
fn module(&self) -> &Module<'a> {
match self.program() {
Program::Module(module) => module,
Program::Script(_) => {
panic!("The root node was a Script and not a Module. Use .script() or .program() instead.")
}
}
}
/// Gets the root node if the view was created from a Script; otherwise panics.
fn script(&self) -> &Script<'a> {
match self.program() {
Program::Script(script) => script,
Program::Module(_) => {
panic!("The root node was a Module and not a Script. Use .module() or .program() instead.")
}
}
}
fn text(&self) -> &'a str {
self.text_fast(self.program())
}
fn previous_token(&self) -> Option<&'a TokenAndSpan> {
self.previous_token_fast(self.program())
}
fn next_token(&self) -> Option<&'a TokenAndSpan> {
self.next_token_fast(self.program())
}
/// Gets the previous tokens in the order they appear in the file.
fn previous_tokens(&self) -> &'a [TokenAndSpan] {
self.previous_tokens_fast(self.program())
}
/// Gets the next tokens in the order they appear in the file.
fn next_tokens(&self) -> &'a [TokenAndSpan] {
self.next_tokens_fast(self.program())
}
}
pub trait CastableNode<'a> {
fn to(node: &Node<'a>) -> Option<&'a Self>;
fn kind() -> NodeKind;
}
#[derive(Clone, Copy)]
pub struct Comments<'a> {
pub leading: &'a SingleThreadedCommentsMapInner,
pub trailing: &'a SingleThreadedCommentsMapInner,
}
#[derive(Clone, Copy)]
pub enum ProgramRef<'a> {
Module(&'a swc_ast::Module),
Script(&'a swc_ast::Script),
}
impl<'a> From<&'a swc_ast::Program> for ProgramRef<'a> {
fn from(program: &'a swc_ast::Program) -> Self {
use swc_ast::Program;
match program {
Program::Module(module) => ProgramRef::Module(module),
Program::Script(script) => ProgramRef::Script(script),
}
}
}
impl<'a> From<&'a swc_ast::Module> for ProgramRef<'a> {
fn from(module: &'a swc_ast::Module) -> Self {
ProgramRef::Module(module)
}
}
impl<'a> From<&'a swc_ast::Script> for ProgramRef<'a> {
fn from(script: &'a swc_ast::Script) -> Self {
ProgramRef::Script(script)
}
}
impl<'a> SourceRanged for ProgramRef<'a> {
fn start(&self) -> SourcePos {
match self {
ProgramRef::Module(node) => node.range().start,
ProgramRef::Script(node) => node.range().start,
}
}
fn end(&self) -> SourcePos {
match self {
ProgramRef::Module(node) => node.range().end,
ProgramRef::Script(node) => node.range().end,
}
}
}
#[derive(Clone, Copy)]
pub struct ProgramInfo<'a> {
pub program: ProgramRef<'a>,
pub text_info: Option<&'a SourceTextInfo>,
pub tokens: Option<&'a [TokenAndSpan]>,
pub comments: Option<Comments<'a>>,
}
#[derive(Clone, Copy)]
pub struct ModuleInfo<'a> {
pub module: &'a swc_ast::Module,
pub text_info: Option<&'a SourceTextInfo>,
pub tokens: Option<&'a [TokenAndSpan]>,
pub comments: Option<Comments<'a>>,
}
#[derive(Clone, Copy)]
pub struct ScriptInfo<'a> {
pub script: &'a swc_ast::Script,
pub text_info: Option<&'a SourceTextInfo>,
pub tokens: Option<&'a [TokenAndSpan]>,
pub comments: Option<Comments<'a>>,
}
#[derive(Clone)]
pub struct AncestorIterator<'a> {
current: Node<'a>,
}
impl<'a> AncestorIterator<'a> {
pub fn new(node: Node<'a>) -> AncestorIterator<'a> {
AncestorIterator { current: node }
}
}
impl<'a> Iterator for AncestorIterator<'a> {
type Item = Node<'a>;
fn next(&mut self) -> Option<Node<'a>> {
let parent = self.current.parent();
if let Some(parent) = parent {
self.current = parent;
}
parent
}
}
pub trait TokenExt {
fn token_index<'a, N: RootNode<'a>>(&self, root_node: N) -> usize;
}
impl TokenExt for TokenAndSpan {
fn token_index<'a, N: RootNode<'a>>(&self, root_node: N) -> usize {
root_node.token_container().get_token_index_at_start(self.start()).unwrap()
}
}
#[cfg(test)]
mod test {
use super::super::test_helpers::run_test;
use crate::common::*;
use crate::view::*;
#[test]
fn it_should_get_children() {
run_test("class Test { a: string; b: number; }", |program| {
let class_decl = program.children()[0].expect::<ClassDecl>();
let children = class_decl.class.children();
assert_eq!(children.len(), 2);
assert_eq!(children[0].text(), "a: string;");
assert_eq!(children[1].text(), "b: number;");
});
}
#[test]
fn it_should_get_all_comments() {
run_test(
r#"
/// <reference path="foo" />
const a = 42;
/*
* block comment
*/
let b = true;
// line comment
let c = "";
function foo(name: /* inline comment */ string) {
console.log(`hello, ${name}`); // greeting!
}
// trailing comment
"#,
|program| {
assert_eq!(program.maybe_comment_container().unwrap().all_comments().count(), 6);
},
);
}
}