Class: RubyLexer::StringToken
Overview
Direct Known Subclasses
Constant Summary collapse
- DQUOTE_ESCAPE_TABLE =
[ ["\n",'\n'], ["\r",'\r'], ["\t",'\t'], ["\v",'\v'], ["\f",'\f'], ["\e",'\e'], ["\b",'\b'], ["\a",'\a'] ]
- PREFIXERS =
{ '['=>"%w[", '{'=>'%W{' }
- SUFFIXERS =
{ '['=>"]", '{'=>'}' }
Instance Attribute Summary collapse
-
#bs_handler ⇒ Object
Returns the value of attribute bs_handler.
-
#char ⇒ Object
readonly
Returns the value of attribute char.
-
#close ⇒ Object
exact seq of (1) char to stop the str.
-
#elems ⇒ Object
Returns the value of attribute elems.
-
#line ⇒ Object
line on which the string ENDS.
-
#lvars ⇒ Object
names used in named backrefs if this is a regex.
-
#modifiers ⇒ Object
for regex only.
-
#open ⇒ Object
exact sequence of chars used to start the str.
-
#startline ⇒ Object
Returns the value of attribute startline.
Attributes inherited from Token
#allow_ooo_offset, #as, #ident, #offset, #tag
Instance Method Summary collapse
- #append(glob) ⇒ Object
- #append_token(strtok) ⇒ Object
- #has_str_inc? ⇒ Boolean
-
#initialize(type = '"', ident = '') ⇒ StringToken
constructor
A new instance of StringToken.
- #to_s(transname = :transform) ⇒ Object
- #to_term ⇒ Object
- #translate_escapes(str) ⇒ Object
- #with_line(line) ⇒ Object
Methods inherited from Token
#error, #has_no_block?, #ws_munge
Constructor Details
#initialize(type = '"', ident = '') ⇒ StringToken
Returns a new instance of StringToken.
237 238 239 240 241 242 243 244 245 |
# File 'lib/rubylexer/token.rb', line 237 def initialize(type='"',ident='') super(ident) type=="'" and type='"' @char=type assert @char[/^[\[{"`\/]$/] #" @elems=[ident.dup] #why .dup? @modifiers=nil @line=nil end |
Instance Attribute Details
#bs_handler ⇒ Object
Returns the value of attribute bs_handler.
225 226 227 |
# File 'lib/rubylexer/token.rb', line 225 def bs_handler @bs_handler end |
#char ⇒ Object (readonly)
Returns the value of attribute char.
219 220 221 |
# File 'lib/rubylexer/token.rb', line 219 def char @char end |
#close ⇒ Object
exact seq of (1) char to stop the str
228 229 230 |
# File 'lib/rubylexer/token.rb', line 228 def close @close end |
#elems ⇒ Object
Returns the value of attribute elems.
222 223 224 |
# File 'lib/rubylexer/token.rb', line 222 def elems @elems end |
#line ⇒ Object
line on which the string ENDS
224 225 226 |
# File 'lib/rubylexer/token.rb', line 224 def line @line end |
#lvars ⇒ Object
names used in named backrefs if this is a regex
230 231 232 |
# File 'lib/rubylexer/token.rb', line 230 def lvars @lvars end |
#modifiers ⇒ Object
for regex only
221 222 223 |
# File 'lib/rubylexer/token.rb', line 221 def modifiers @modifiers end |
#open ⇒ Object
exact sequence of chars used to start the str
227 228 229 |
# File 'lib/rubylexer/token.rb', line 227 def open @open end |
#startline ⇒ Object
Returns the value of attribute startline.
223 224 225 |
# File 'lib/rubylexer/token.rb', line 223 def startline @startline end |
Instance Method Details
#append(glob) ⇒ Object
310 311 312 313 314 315 316 317 318 |
# File 'lib/rubylexer/token.rb', line 310 def append(glob) #assert @elems.last.kind_of?(String) case glob when String,Integer then append_str! glob when RubyCode then append_code! glob else raise "bad string contents: #{glob}, a #{glob.class}" end #assert @elems.last.kind_of?(String) end |
#append_token(strtok) ⇒ Object
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 |
# File 'lib/rubylexer/token.rb', line 320 def append_token(strtok) assert @elems.last.kind_of?(String) #assert strtok.elems.last.kind_of?(String) assert strtok.elems.first.kind_of?(String) @elems.last << strtok.elems.shift first=strtok.elems.first assert( first.nil? || first.kind_of?(RubyCode) ) @elems += strtok.elems @ident << strtok.ident assert((!@modifiers or !strtok.modifiers)) @modifiers||=strtok.modifiers #assert @elems.last.kind_of?(String) @bs_handler ||=strtok.bs_handler return self end |
#has_str_inc? ⇒ Boolean
260 261 262 |
# File 'lib/rubylexer/token.rb', line 260 def has_str_inc? elems.size>1 or RubyCode===elems.first end |
#to_s(transname = :transform) ⇒ Object
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 |
# File 'lib/rubylexer/token.rb', line 264 def to_s transname=:transform assert @char[/[\[{"`\/]/] #" #on output, all single-quoted strings become double-quoted assert(@elems.length==1) if @char=='[' result=open.dup starter=result[-1,1] ender=close elems.each{|e| case e when String; result<<e # strfrag=translate_escapes strfrag if RubyLexer::FASTER_STRING_ESCAPES # result << send(transname,strfrag,starter,ender) when VarNameToken; if /^[$@]/===e.to_s result << '#' + e.to_s else result << "\#{#{e}}" end when RubyCode; result << '#' + e.to_s else fail end } result << ender if @char=='/' result << modifiers if modifiers #regex only result="%r"+result if RubyLexer::WHSPLF[result[1,1]] end return result end |
#to_term ⇒ Object
297 298 299 300 301 302 303 304 305 306 307 308 |
# File 'lib/rubylexer/token.rb', line 297 def to_term result=[] 0.step(@elems.length-1,2) { |i| result << ConstTerm.new(@elems[i].dup) if e=@elems[i+1] assert(e.kind_of?(RubyCode)) result << (RubyTerm.new e) end } return result end |
#translate_escapes(str) ⇒ Object
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 |
# File 'lib/rubylexer/token.rb', line 343 def translate_escapes(str) rl=RubyLexer.new("(string escape translation hack...)",'') result=str.dup seq=result.to_sequence rl.instance_eval{@file=seq} repls=[] i=0 #ugly ugly ugly while i<result.size and bs_at=result.index(/\\./m,i) seq.pos=$~.end(0)-1 ch=rl.send(bs_handler,"\\",@open[-1,1],@close) result[bs_at...seq.pos]=ch i=bs_at+ch.size end return result end |
#with_line(line) ⇒ Object
232 233 234 235 |
# File 'lib/rubylexer/token.rb', line 232 def with_line(line) @line=line self end |