Files
adler32
aho_corasick
alga
approx
ascii
atty
backtrace
backtrace_sys
base64
bitflags
blas_src
block_buffer
block_padding
brotli2
brotli_sys
buf_redux
byte_tools
byteorder
cauchy
cblas_sys
cfg_if
chrono
chunked_transfer
colored
crc32fast
crossbeam
crossbeam_channel
crossbeam_deque
crossbeam_epoch
crossbeam_queue
crossbeam_utils
ctrlc
deflate
digest
dirs
error_chain
filetime
futures
generic_array
getrandom
gzip_header
hex
httparse
hyper
idna
itoa
language_tags
lapack_src
lapacke
lapacke_sys
lazy_static
libc
libm
linked_hash_map
log
matches
matrixmultiply
maybe_uninit
md5
memchr
memoffset
mime
mime_guess
multipart
nalgebra
base
geometry
linalg
ndarray
ndarray_linalg
net2
netlib_src
nix
num_complex
num_cpus
num_integer
num_rational
num_traits
opaque_debug
percent_encoding
phf
phf_shared
ppv_lite86
proc_macro2
quick_error
quote
rand
rand_chacha
rand_core
rand_distr
rawpointer
regex
regex_syntax
remove_dir_all
rosrust
rosrust_codegen
rosrust_msg
rouille
rustc_demangle
rustros_tf
ryu
safemem
scopeguard
serde
serde_bytes
serde_derive
serde_json
serde_xml_rs
sha1
siphasher
smallvec
syn
tempdir
term
thread_local
threadpool
time
tiny_http
traitobject
twoway
typeable
typenum
ucd_util
unicase
unicode_bidi
unicode_normalization
unicode_xid
url
utf8_ranges
void
xml
xml_rpc
yaml_rust
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.

//! ncurses-compatible compiled terminfo format parsing (term(5))

use std::collections::HashMap;
use std::io::prelude::*;
use std::io;

use byteorder::{LittleEndian, ReadBytesExt};

use terminfo::Error::*;
use terminfo::TermInfo;
use Result;

pub use terminfo::parser::names::*;

// These are the orders ncurses uses in its compiled format (as of 5.9). Not
// sure if portable.

fn read_le_u16(r: &mut io::Read) -> io::Result<u32> {
    return r.read_u16::<LittleEndian>().map(|i| i as u32);
}

fn read_le_u32(r: &mut io::Read) -> io::Result<u32> {
    return r.read_u32::<LittleEndian>();
}

fn read_byte(r: &mut io::Read) -> io::Result<u8> {
    match r.bytes().next() {
        Some(s) => s,
        None => Err(io::Error::new(io::ErrorKind::Other, "end of file")),
    }
}

/// Parse a compiled terminfo entry, using long capability names if `longnames`
/// is true
pub fn parse(file: &mut io::Read, longnames: bool) -> Result<TermInfo> {
    let (bnames, snames, nnames) = if longnames {
        (boolfnames, stringfnames, numfnames)
    } else {
        (boolnames, stringnames, numnames)
    };

    // Check magic number
    let magic = file.read_u16::<LittleEndian>()?;

    let read_number = match magic {
        0x011A => read_le_u16,
        0x021e => read_le_u32,
        _ => return Err(BadMagic(magic).into()),
    };

    // According to the spec, these fields must be >= -1 where -1 means that the
    // feature is not
    // supported. Using 0 instead of -1 works because we skip sections with length
    // 0.
    macro_rules! read_nonneg {
        () => {{
            match read_le_u16(file)? as i16 {
                n if n >= 0 => n as usize,
                -1 => 0,
                _ => return Err(InvalidLength.into()),
            }
        }}
    }

    let names_bytes = read_nonneg!();
    let bools_bytes = read_nonneg!();
    let numbers_count = read_nonneg!();
    let string_offsets_count = read_nonneg!();
    let string_table_bytes = read_nonneg!();

    if names_bytes == 0 {
        return Err(ShortNames.into());
    }

    if bools_bytes > boolnames.len() {
        return Err(TooManyBools.into());
    }

    if numbers_count > numnames.len() {
        return Err(TooManyNumbers.into());
    }

    if string_offsets_count > stringnames.len() {
        return Err(TooManyStrings.into());
    }

    // don't read NUL
    let mut bytes = Vec::new();
    file.take((names_bytes - 1) as u64).read_to_end(&mut bytes)?;
    let names_str = match String::from_utf8(bytes) {
        Ok(s) => s,
        Err(e) => return Err(NotUtf8(e.utf8_error()).into()),
    };

    let term_names: Vec<String> = names_str.split('|').map(|s| s.to_owned()).collect();
    // consume NUL
    if read_byte(file)? != b'\0' {
        return Err(NamesMissingNull.into());
    }

    let bools_map = (0..bools_bytes)
        .filter_map(|i| match read_byte(file) {
            Err(e) => Some(Err(e)),
            Ok(1) => Some(Ok((bnames[i], true))),
            Ok(_) => None,
        })
        .collect::<io::Result<HashMap<_, _>>>()?;

    if (bools_bytes + names_bytes) % 2 == 1 {
        read_byte(file)?; // compensate for padding
    }

    let numbers_map = (0..numbers_count)
        .filter_map(|i| match read_number(file) {
            Ok(0xFFFF) => None,
            Ok(n) => Some(Ok((nnames[i], n))),
            Err(e) => Some(Err(e)),
        })
        .collect::<io::Result<HashMap<_, _>>>()?;

    let string_map: HashMap<&str, Vec<u8>> = if string_offsets_count > 0 {
        let string_offsets = (0..string_offsets_count)
            .map(|_| file.read_u16::<LittleEndian>())
            .collect::<io::Result<Vec<_>>>()?;

        let mut string_table = Vec::new();
        file.take(string_table_bytes as u64)
            .read_to_end(&mut string_table)?;

        string_offsets
            .into_iter()
            .enumerate()
            .filter(|&(_, offset)| {
                // non-entry
                offset != 0xFFFF
            })
            .map(|(i, offset)| {
                let offset = offset as usize;

                let name = if snames[i] == "_" {
                    stringfnames[i]
                } else {
                    snames[i]
                };

                if offset == 0xFFFE {
                    // undocumented: FFFE indicates cap@, which means the capability
                    // is not present
                    // unsure if the handling for this is correct
                    return Ok((name, Vec::new()));
                }

                // Find the offset of the NUL we want to go to
                let nulpos = string_table[offset..string_table_bytes]
                    .iter()
                    .position(|&b| b == 0);
                match nulpos {
                    Some(len) => Ok((name, string_table[offset..offset + len].to_vec())),
                    None => return Err(::Error::TerminfoParsing(StringsMissingNull)),
                }
            })
            .collect::<Result<HashMap<_, _>>>()?
    } else {
        HashMap::new()
    };

    // And that's all there is to it
    Ok(TermInfo {
        names: term_names,
        bools: bools_map,
        numbers: numbers_map,
        strings: string_map,
    })
}

#[cfg(test)]
mod test {

    use super::{boolfnames, boolnames, numfnames, numnames, stringfnames, stringnames};

    #[test]
    fn test_veclens() {
        assert_eq!(boolfnames.len(), boolnames.len());
        assert_eq!(numfnames.len(), numnames.len());
        assert_eq!(stringfnames.len(), stringnames.len());
    }
}