diff --git a/bootscripts/lfs/init.d/checkfs b/bootscripts/lfs/init.d/checkfs index 584921983..0903a0111 100644 --- a/bootscripts/lfs/init.d/checkfs +++ b/bootscripts/lfs/init.d/checkfs @@ -34,7 +34,7 @@ # Default-Start: S # Default-Stop: # Short-Description: Checks local filesystems before mounting. -# Description: Checks local filesystmes before mounting. +# Description: Checks local filesystems before mounting. # X-LFS-Provided-By: LFS ### END INIT INFO diff --git a/bootscripts/lfs/init.d/mountfs b/bootscripts/lfs/init.d/mountfs index ea413392e..cd9407dc0 100644 --- a/bootscripts/lfs/init.d/mountfs +++ b/bootscripts/lfs/init.d/mountfs @@ -55,7 +55,7 @@ case "${1}" in stop) # Don't unmount virtual file systems like /run log_info_msg "Unmounting all other currently mounted file systems..." - # Ensure any loop devies are removed + # Ensure any loop devices are removed losetup -D umount --all --detach-loop --read-only \ --types notmpfs,nosysfs,nodevtmpfs,noproc,nodevpts >/dev/null diff --git a/bootscripts/lfs/init.d/rc b/bootscripts/lfs/init.d/rc index 7dd503a07..dd855a71b 100644 --- a/bootscripts/lfs/init.d/rc +++ b/bootscripts/lfs/init.d/rc @@ -183,7 +183,7 @@ fi # Start all services marked as S in this runlevel, except if marked as # S in the previous runlevel -# it is the responsabily of the script to not try to start an already running +# it is the responsibility of the script to not try to start an already running # service for i in $( ls -v /etc/rc.d/rc${runlevel}.d/S* 2> /dev/null) do diff --git a/bootscripts/lfs/init.d/template b/bootscripts/lfs/init.d/template index 0a7872d98..4b3c4642a 100644 --- a/bootscripts/lfs/init.d/template +++ b/bootscripts/lfs/init.d/template @@ -45,7 +45,7 @@ case "${1}" in # if it is possible to use killproc killproc fully_qualified_path # if it is not possible to use killproc - # (the daemon shoudn't be stopped by killing it) + # (the daemon shouldn't be stopped by killing it) if pidofproc daemon_name_as_reported_by_ps >/dev/null; then command_to_stop_the_service fi diff --git a/bootscripts/lfs/lib/services/init-functions b/bootscripts/lfs/lib/services/init-functions index ece4d79aa..a86a23d86 100644 --- a/bootscripts/lfs/lib/services/init-functions +++ b/bootscripts/lfs/lib/services/init-functions @@ -155,7 +155,7 @@ start_daemon() fi # Return a value ONLY - # It is the init script's (or distribution's functions) responsibilty + # It is the init script's (or distribution's functions) responsibility # to log messages! case "${retval}" in @@ -271,7 +271,7 @@ killproc() fi # Return a value ONLY - # It is the init script's (or distribution's functions) responsibilty + # It is the init script's (or distribution's functions) responsibility # to log messages! case "${retval}" in diff --git a/bootscripts/lfs/sysconfig/createfiles b/bootscripts/lfs/sysconfig/createfiles index 378fa3ee7..bb744778f 100644 --- a/bootscripts/lfs/sysconfig/createfiles +++ b/bootscripts/lfs/sysconfig/createfiles @@ -21,7 +21,7 @@ # dev creates a new device # is either block, char or pipe # block creates a block device -# char creates a character deivce +# char creates a character device # pipe creates a pipe, this will ignore the and # fields # and are the major and minor numbers used for diff --git a/bootscripts/lfs/sysconfig/rc.site b/bootscripts/lfs/sysconfig/rc.site index 903aebff5..56b6f8d9b 100644 --- a/bootscripts/lfs/sysconfig/rc.site +++ b/bootscripts/lfs/sysconfig/rc.site @@ -32,7 +32,7 @@ #FAILURE_PREFIX="${FAILURE}*****${NORMAL} " #WARNING_PREFIX="${WARNING} *** ${NORMAL} " -# Manually seet the right edge of message output (characters) +# Manually set the right edge of message output (characters) # Useful when resetting console font during boot to override # automatic screen width detection #COLUMNS=120 diff --git a/chapter01/changelog.xml b/chapter01/changelog.xml index 7c4a15381..527571db1 100644 --- a/chapter01/changelog.xml +++ b/chapter01/changelog.xml @@ -40,6 +40,48 @@ appropriate for the entry or if needed the entire day's listitem. --> + + 2022-10-01 + + + [bdubbs] - Update to iana-etc-20220922. Addresses + #5006. + + + [bdubbs] - Update to tzdata-2022d. Fixes + #5119. + + + [bdubbs] - Update to readline-8.2. Fixes + #5121. + + + [bdubbs] - Update to linux-5.19.12. Fixes + #5115. + + + [bdubbs] - Update to libffi-3.4.3. Fixes + #5116. + + + [bdubbs] - Update to libcap-2.66. Fixes + #5120. + + + [bdubbs] - Update to dbus-1.14.2. Fixes + #5123. + + + [bdubbs] - Update to bc-6.0.4. Fixes + #5114. + + + [bdubbs] - Update to bash-5.2. Fixes + #5122. + + + + 2022-09-22 diff --git a/chapter01/whatsnew.xml b/chapter01/whatsnew.xml index 3c6df1428..ac7a4140c 100644 --- a/chapter01/whatsnew.xml +++ b/chapter01/whatsnew.xml @@ -11,6 +11,14 @@ What's new since the last release + In 11.3 release, --enable-default-pie + and --enable-default-ssp are enabled for GCC. + They can mitigate some type of malicious attacks but they cannot provide + a full protection. In case if you are reading a programming textbook, + you may need to disable PIE and SSP with GCC options + -fno-pie -no-pie -fno-stack-protection + because some textbooks assume they were disabled by default. + Below is a list of package updates made since the previous release of the book. @@ -38,9 +46,9 @@ - + Bc &bc-version; @@ -62,9 +70,9 @@ - + @@ -122,9 +130,9 @@ - + @@ -149,15 +157,15 @@ - + - + @@ -218,9 +226,9 @@ Python-&python-version; - + @@ -245,9 +253,9 @@ - + diff --git a/chapter02/mounting.xml b/chapter02/mounting.xml index f222fb052..1c2e7205e 100644 --- a/chapter02/mounting.xml +++ b/chapter02/mounting.xml @@ -15,6 +15,11 @@ the file system is mounted at the directory specified by the LFS environment variable described in the previous section. + + Strictly speaking, one cannot "mount a partition". One mounts the file + system embedded in that partition. But since a single partition can't contain + more than one file system, people often speak of the partition and the + associated file system as if they were one and the same. Create the mount point and mount the LFS file system with these commands: diff --git a/chapter03/introduction.xml b/chapter03/introduction.xml index 1a69187a2..0e90508ae 100644 --- a/chapter03/introduction.xml +++ b/chapter03/introduction.xml @@ -104,4 +104,14 @@ popd This check can be used after retrieving the needed files with any of the methods listed above. + If the packages and patches are downloaded as a non-&root; user, + these files will be owned by the user. The file system records the + owner by its UID, and the UID of a normal user in the host distro is + not assigned in LFS. So the files will be left owned by an unnamed UID + in the final LFS system. If you won't assign the same UID for your user + in the LFS system, change the owners of these files to &root; now to + avoid this issue: + +chown root:root $LFS/sources/* + diff --git a/chapter04/aboutsbus.xml b/chapter04/aboutsbus.xml index 1928423ea..82934d851 100644 --- a/chapter04/aboutsbus.xml +++ b/chapter04/aboutsbus.xml @@ -13,25 +13,25 @@ Many people would like to know beforehand approximately how long it takes to compile and install each package. Because Linux From Scratch can be built on many different systems, it is impossible to - provide accurate time estimates. The biggest package (Glibc) will + provide absolute time estimates. The biggest package (Glibc) will take approximately 20 minutes on the fastest systems, but could take up to three days on slower systems! Instead of providing actual times, the Standard Build Unit (SBU) measure will be used instead. The SBU measure works as follows. The first package to be compiled - from this book is binutils in . The - time it takes to compile this package is what will be referred to as the - Standard Build Unit or SBU. All other compile times will be expressed relative - to this time. + is binutils in . The + time it takes to compile this package is what we will refer to as the + Standard Build Unit or SBU. All other compile times will be expressed in + terms of this unit of time. For example, consider a package whose compilation time is 4.5 - SBUs. This means that if a system took 10 minutes to compile and + SBUs. This means that if your system took 10 minutes to compile and install the first pass of binutils, it will take - approximately 45 minutes to build this example package. - Fortunately, most build times are shorter than the one for binutils. + approximately 45 minutes to build the example package. + Fortunately, most build times are shorter than one SBU. - In general, SBUs are not entirely accurate because they depend on many + SBUs are not entirely accurate because they depend on many factors, including the host system's version of GCC. They are provided here to give an estimate of how long it might take to install a package, but the numbers can vary by as much as dozens of minutes in some cases. @@ -45,15 +45,15 @@ export MAKEFLAGS='-j4' - or just building with: + or by building with: make -j4 When multiple processors are used in this way, the SBU units in the book will vary even more than they normally would. In some cases, the make step will simply fail. Analyzing the output of the build process will also - be more difficult because the lines of different processes will be - interleaved. If you run into a problem with a build step, revert back to a + be more difficult because the lines from different processes will be + interleaved. If you run into a problem with a build step, revert to a single processor build to properly analyze the error messages. diff --git a/chapter04/abouttestsuites.xml b/chapter04/abouttestsuites.xml index 9206f33be..459472be0 100644 --- a/chapter04/abouttestsuites.xml +++ b/chapter04/abouttestsuites.xml @@ -27,21 +27,21 @@ Running the test suites in and - is impossible, since the programs are compiled with a cross-compiler, - so are not supposed to be able to run on the build host. + is pointless; since the test programs are compiled with a cross-compiler, + they probably can't run on the build host. A common issue with running the test suites for binutils and GCC - is running out of pseudo terminals (PTYs). This can result in a high + is running out of pseudo terminals (PTYs). This can result in a large number of failing tests. This may happen for several reasons, but the most likely cause is that the host system does not have the devpts file system set up correctly. This issue is discussed in greater detail at . - Sometimes package test suites will fail, but for reasons which the + Sometimes package test suites will fail for reasons which the developers are aware of and have deemed non-critical. Consult the logs located at to verify whether or not these failures are - expected. This site is valid for all tests throughout this book. + expected. This site is valid for all test suites throughout this book. diff --git a/chapter04/addinguser.xml b/chapter04/addinguser.xml index 3620524c8..a7f12a1ea 100644 --- a/chapter04/addinguser.xml +++ b/chapter04/addinguser.xml @@ -14,9 +14,9 @@ making a single mistake can damage or destroy a system. Therefore, the packages in the next two chapters are built as an unprivileged user. You could use your own user name, but to make it easier to set up a clean - working environment, create a new user called lfs as a member of a new group (also named - lfs) and use this user during + lfs) and run commands as &lfs-user; during the installation process. As root, issue the following commands to add the new user: @@ -24,7 +24,7 @@ useradd -s /bin/bash -g lfs -m -k /dev/null lfs - The meaning of the command line options: + This is what the command line options mean: -s /bin/bash @@ -54,7 +54,7 @@ useradd -s /bin/bash -g lfs -m -k /dev/null lfs -k /dev/null This parameter prevents possible copying of files from a skeleton - directory (default is /etc/skel) + directory (the default is /etc/skel) by changing the input location to the special null device. @@ -68,34 +68,34 @@ useradd -s /bin/bash -g lfs -m -k /dev/null lfs - To log in as lfs (as opposed - to switching to user lfs when logged - in as root, which does not require - the lfs user to have a password), - give lfs a password: + If you want to log in as &lfs-user; or switch to &lfs-user; from a + non-&root; user (as opposed to switching to user &lfs-user; + when logged in as &root;, which does not require the &lfs-user; user to + have a password), you need to set a password of &lfs-user;. Issue the + following command as the &root; user to set the password: passwd lfs Grant lfs full access to - all directories under $LFS by making - lfs the directory owner: + all the directories under $LFS by making + lfs the owner: chown -v lfs $LFS/{usr{,/*},lib,var,etc,bin,sbin,tools} - In some host systems, the following command does not complete - properly and suspends the login to the &lfs-user; user to the background. +In some host systems, the following su command does not complete + properly and suspends the login for the &lfs-user; user to the background. If the prompt "lfs:~$" does not appear immediately, entering the fg command will fix the issue. - Next, login as user lfs. - This can be done via a virtual console, through a display manager, or with - the following substitute/switch user command: + Next, start a shell running as user &lfs-user;. This can be done by + logging in as &lfs-user; on a virtual console, or with the following + substitute/switch user command: su - lfs The - instructs su to start a login shell as opposed to a non-login shell. - The difference between these two types of shells can be found in detail in + The difference between these two types of shells is described in detail in bash(1) and info bash. diff --git a/chapter04/creatingminlayout.xml b/chapter04/creatingminlayout.xml index 3840138ff..59d3ee908 100644 --- a/chapter04/creatingminlayout.xml +++ b/chapter04/creatingminlayout.xml @@ -10,8 +10,9 @@ Creating a limited directory layout in LFS filesystem - The next task to be performed in the LFS partition is to create a limited - directory hierarchy, so that the programs compiled in In this section, we begin populating the LFS filesystem with the + pieces that will constitute the final Linux system. The first step is to + create a limited directory hierarchy, so that the programs compiled in (as well as glibc and libstdc++ in ) can be installed in their final location. We do this so those temporary programs will be overwritten when diff --git a/chapter04/settingenviron.xml b/chapter04/settingenviron.xml index bac551e19..a57246217 100644 --- a/chapter04/settingenviron.xml +++ b/chapter04/settingenviron.xml @@ -19,8 +19,10 @@ exec env -i HOME=$HOME TERM=$TERM PS1='\u:\w\$ ' /bin/bash EOF - When logged on as user lfs, - the initial shell is usually a login shell which reads + When logged on as user lfs + or switched to the &lfs-user; user using a su command + with - option, + the initial shell is a login shell which reads the /etc/profile of the host (probably containing some settings and environment variables) and then .bash_profile. The exec env -i.../bin/bash command in the @@ -32,7 +34,7 @@ EOF ensuring a clean environment. The new instance of the shell is a non-login - shell, which does not read, and execute, the contents of /etc/profile or + shell, which does not read, and execute, the contents of the /etc/profile or .bash_profile files, but rather reads, and executes, the .bashrc file instead. Create the .bashrc file now: @@ -59,10 +61,10 @@ EOF The set +h command turns off bash's hash function. Hashing is ordinarily a useful feature—bash uses a hash table to remember the - full path of executable files to avoid searching the PATH + full path to executable files to avoid searching the PATH time and again to find the same executable. However, the new tools should - be used as soon as they are installed. By switching off the hash function, - the shell will always search the PATH when a program is to + be used as soon as they are installed. Switching off the hash function forces + the shell to search the PATH whenever a program is to be run. As such, the shell will find the newly compiled tools in $LFS/tools/bin as soon as they are available without remembering a previous version of the same program @@ -115,10 +117,10 @@ EOF PATH=/usr/bin - Many modern linux distributions have merged Many modern Linux distributions have merged /bin and /usr/bin. When this is the case, the standard - PATH variable needs just to be set to PATH variable should be set to /usr/bin/ for the environment. When this is not the case, the following line adds /bin @@ -141,7 +143,7 @@ EOF standard PATH, the cross-compiler installed at the beginning of is picked up by the shell immediately after its installation. This, combined with turning off hashing, - limits the risk that the compiler from the host be used instead of the + limits the risk that the compiler from the host is used instead of the cross-compiler. @@ -195,7 +197,8 @@ EOF Finally, to have the environment fully prepared for building the - temporary tools, source the just-created user profile: + temporary tools, force the bash shell to read + the new user profile: source ~/.bash_profile diff --git a/chapter07/creatingdirs.xml b/chapter07/creatingdirs.xml index bc6162e61..35a0a9003 100644 --- a/chapter07/creatingdirs.xml +++ b/chapter07/creatingdirs.xml @@ -10,10 +10,10 @@ Creating Directories - It is time to create the full structure in the LFS file system. + It is time to create the full directory structure in the LFS file system. - Some of the directories mentioned in this section may be - already created earlier with explicit instructions or when installing some + Some of the directories mentioned in this section may have + already been created earlier with explicit instructions, or when installing some packages. They are repeated below for completeness. Create some root-level directories that are not in the limited set @@ -42,14 +42,14 @@ install -dv -m 0750 /root install -dv -m 1777 /tmp /var/tmp Directories are, by default, created with permission mode 755, but - this is not desirable for all directories. In the commands above, two + this is not desirable everywhere. In the commands above, two changes are made—one to the home directory of user root, and another to the directories for temporary files. The first mode change ensures that not just anybody can enter - the /root directory—the - same as a normal user would do with his or her home directory. The + the /root directory—just + like a normal user would do with his or her own home directory. The second mode change makes sure that any user can write to the /tmp and /var/tmp directories, but cannot remove @@ -59,14 +59,14 @@ install -dv -m 1777 /tmp /var/tmp FHS Compliance Note - The directory tree is based on the Filesystem Hierarchy Standard + This directory tree is based on the Filesystem Hierarchy Standard (FHS) (available at ). The FHS also specifies - the optional existence of some directories such as /usr/local/games and /usr/share/games. We create only the - directories that are needed. However, feel free to create these - directories. + class="directory">/usr/share/games. In LFS, we create only the + directories that are really necessary. However, feel free to create more + directories, if you wish. diff --git a/chapter07/introduction.xml b/chapter07/introduction.xml index 6605ddea4..d00f3fe2a 100644 --- a/chapter07/introduction.xml +++ b/chapter07/introduction.xml @@ -11,22 +11,22 @@ Introduction This chapter shows how to build the last missing bits of the temporary - system: the tools needed by the build machinery of various packages. Now + system: the tools needed to build the various packages. Now that all circular dependencies have been resolved, a chroot environment, completely isolated from the host operating system (except for the running kernel), can be used for the build. For proper operation of the isolated environment, some communication - with the running kernel must be established. This is done through the - so-called Virtual Kernel File Systems, which must be - mounted when entering the chroot environment. You may want to check - that they are mounted by issuing findmnt. + with the running kernel must be established. This is done via the + so-called Virtual Kernel File Systems, which will be + mounted before entering the chroot environment. You may want to verify + that they are mounted by issuing the findmnt command. Until , the commands must be run as root, with the LFS variable set. After entering chroot, all commands are run as &root;, fortunately without access to the OS of the computer you built LFS on. Be careful anyway, as it is easy to destroy the whole - LFS system with badly formed commands. + LFS system with bad commands. diff --git a/chapter07/kernfs.xml b/chapter07/kernfs.xml index c9721113d..3e96bee5e 100644 --- a/chapter07/kernfs.xml +++ b/chapter07/kernfs.xml @@ -14,12 +14,14 @@ /dev/* - Various file systems exported by the kernel are used to communicate to - and from the kernel itself. These file systems are virtual in that no disk + Applications running in user space utilize various file + systems exported by the kernel to communicate + with the kernel itself. These file systems are virtual: no disk space is used for them. The content of the file systems resides in - memory. + memory. These file systems must be mounted in the $LFS directory tree + so the applications can find them in the chroot environment. - Begin by creating directories onto which the file systems will be + Begin by creating directories on which the file systems will be mounted: mkdir -pv $LFS/{dev,proc,sys,run} @@ -27,20 +29,31 @@ Mounting and Populating /dev - During a normal boot, the kernel automatically mounts the - devtmpfs filesystem on the - /dev directory, and allow the - devices to be created dynamically on that virtual filesystem as they - are detected or accessed. Device creation is generally done during the - boot process by the kernel and Udev. - Since this new system does not yet have Udev and - has not yet been booted, it is necessary to mount and populate - /dev manually. This is - accomplished by bind mounting the host system's + During a normal boot of the LFS system, the kernel automatically + mounts the devtmpfs + filesystem on the + /dev directory; the kernel + creates device nodes on that virtual filesystem during the boot process + or when a device is first detected or accessed. The udev daemon may + change the owner or permission of the device nodes created by the + kernel, or create new device nodes or symlinks to ease the work of + distro maintainers or system administrators. (See + for details.) + If the host kernel supports &devtmpfs;, we can simply mount a + &devtmpfs; at $LFS/dev and rely + on the kernel to populate it (the LFS building process does not need + the additional work onto &devtmpfs; by udev daemon). + + But, some host kernels may lack &devtmpfs; support and these + host distros maintain the content of + /dev with different methods. + So the only host-agnostic way for populating + $LFS/dev is + bind mounting the host system's /dev directory. A bind mount is a special type of mount that allows you to create a mirror of a - directory or mount point to some other location. Use the following - command to achieve this: + directory or mount point at some other location. Use the following + command to do this: mount -v --bind /dev $LFS/dev @@ -89,10 +102,10 @@ mount -vt tmpfs tmpfs $LFS/run The /run tmpfs was mounted above so in this case only a directory needs to be created. - In other cases /dev/shm is a mountpoint + In other host systems /dev/shm is a mount point for a tmpfs. In that case the mount of /dev above will only create - /dev/shm in the chroot environment as a directory. In this situation - we explicitly mount a tmpfs, + /dev/shm as a directory in the chroot environment. In this situation + we must explicitly mount a tmpfs: if [ -h $LFS/dev/shm ]; then mkdir -pv $LFS/$(readlink $LFS/dev/shm) diff --git a/chapter08/autoconf.xml b/chapter08/autoconf.xml index d7563963b..7925e44ea 100644 --- a/chapter08/autoconf.xml +++ b/chapter08/autoconf.xml @@ -40,12 +40,13 @@ Installation of Autoconf - + First, fix several problems with the tests caused by bash-5.2 and later: + + sed -e 's/SECONDS|/&SHLVL|/' \ + -e '/BASH_ARGV=/a\ /^SHLVL=/ d' \ + -i.orig tests/local.at + Prepare Autoconf for compilation: ./configure --prefix=/usr diff --git a/chapter08/pkgmgt.xml b/chapter08/pkgmgt.xml index b925a976f..60c472ad4 100644 --- a/chapter08/pkgmgt.xml +++ b/chapter08/pkgmgt.xml @@ -11,13 +11,13 @@ Package Management Package Management is an often requested addition to the LFS Book. A - Package Manager allows tracking the installation of files making it easy to + Package Manager tracks the installation of files, making it easier to remove and upgrade packages. As well as the binary and library files, a package manager will handle the installation of configuration files. Before you begin to wonder, NO—this section will not talk about nor recommend any particular package manager. What it provides is a roundup of the more popular techniques and how they work. The perfect package manager for you may - be among these techniques or may be a combination of two or more of these + be among these techniques, or it may be a combination of two or more of these techniques. This section briefly mentions issues that may arise when upgrading packages. @@ -32,14 +32,14 @@ There are multiple solutions for package management, each having - its strengths and drawbacks. Including one that satisfies all audiences + its strengths and drawbacks. Finding one solution that satisfies all audiences is difficult. There are some hints written on the topic of package management. Visit the Hints Project and see if one of them - fits your need. + fits your needs. Upgrade Issues @@ -51,18 +51,18 @@ - If Linux kernel needs to be upgraded (for example, from - 5.10.17 to 5.10.18 or 5.11.1), nothing else need to be rebuilt. - The system will keep working fine thanks to the well-defined border - between kernel and userspace. Specifically, Linux API headers - need not to be (and should not be, see the next item) upgraded - alongside the kernel. You'll need to reboot your system to use the + If the Linux kernel needs to be upgraded (for example, from + 5.10.17 to 5.10.18 or 5.11.1), nothing else needs to be rebuilt. + The system will keep working fine thanks to the well-defined interface + between the kernel and user space. Specifically, Linux API headers + need not be (and should not be, see the next item) upgraded + along with the kernel. You will merely need to reboot your system to use the upgraded kernel. - If Linux API headers or Glibc needs to be upgraded to a newer - version, (e.g. from glibc-2.31 to glibc-2.32), it is safer to + If Linux API headers or glibc need to be upgraded to a newer + version, (e.g., from glibc-2.31 to glibc-2.32), it is safer to rebuild LFS. Though you may be able to rebuild all the packages in their dependency order, we do not recommend it. @@ -70,44 +70,44 @@ If a package containing a shared library is updated, and if the name of the library changes, then any packages dynamically - linked to the library need to be recompiled in order to link against the + linked to the library must be recompiled, to link against the newer library. (Note that there is no correlation between the package version and the name of the library.) For example, consider a package - foo-1.2.3 that installs a shared library with name libfoo.so.1. If you upgrade the package to - a newer version foo-1.2.4 that installs a shared library with name + foo-1.2.3 that installs a shared library with the name libfoo.so.1. Suppose you upgrade the package to + a newer version foo-1.2.4 that installs a shared library with the name libfoo.so.2. In this case, any packages that are dynamically linked to libfoo.so.1 need to be recompiled to link against libfoo.so.2 in order to - use the new library version. You should not remove the previous - libraries unless all the dependent packages are recompiled. + use the new library version. You should not remove the old + libraries until all the dependent packages have been recompiled. If a package containing a shared library is updated, - and the name of library doesn't change, but the version number of the + and the name of the library doesn't change, but the version number of the library file decreases (for example, - the name of the library is kept named + the library is still named libfoo.so.1, - but the name of library file is changed from + but the name of the library file is changed from libfoo.so.1.25 to libfoo.so.1.24), you should remove the library file from the previously installed version - (libfoo.so.1.25 in the case). - Or, a ldconfig run (by yourself using a command + (libfoo.so.1.25 in this case). + Otherwise, a ldconfig command (invoked by yourself from the command line, or by the installation of some package) will reset the symlink libfoo.so.1 to point to - the old library file because it seems having a newer - version, as its version number is larger. This situation may happen if - you have to downgrade a package, or the package changes the versioning - scheme of library files suddenly. + the old library file because it seems to be a newer + version; its version number is larger. This situation may arise if + you have to downgrade a package, or if the authors change the versioning + scheme for library files. If a package containing a shared library is updated, - and the name of library doesn't change, but a severe issue + and the name of the library doesn't change, but a severe issue (especially, a security vulnerability) is fixed, all running programs linked to the shared library should be restarted. The following command, run as root after - updating, will list what is using the old versions of those libraries + the update is complete, will list which processes are using the old versions of those libraries (replace libfoo with the name of the library): @@ -115,33 +115,33 @@ tr -cd 0-9\\n | xargs -r ps u - If OpenSSH is being used for accessing - the system and it is linked to the updated library, you need to - restart sshd service, then logout, login again, - and rerun that command to confirm nothing is still using the + If OpenSSH is being used to access + the system and it is linked to the updated library, you must + restart the sshd service, then logout, login again, + and rerun the preceding ps command to confirm that nothing is still using the deleted libraries. If the systemd daemon (running as PID 1) is - linked to the updated library, you can restart it without reboot + linked to the updated library, you can restart it without rebooting by running systemctl daemon-reexec as the root user. - If a binary or a shared library is overwritten, the processes - using the code or data in the binary or library may crash. The - correct way to update a binary or a shared library without causing + If an executable program or a shared library is overwritten, the processes + using the code or data in that program or library may crash. The + correct way to update a program or a shared library without causing the process to crash is to remove it first, then install the new - version into position. The install command - provided by Coreutils has already - implemented this and most packages use it to install binaries and + version. The install command + provided by coreutils has already + implemented this, and most packages use that command to install binary files and libraries. This means that you won't be troubled by this issue most of the time. However, the install process of some packages (notably Mozilla JS - in BLFS) just overwrites the file if it exists and causes a crash, so + in BLFS) just overwrites the file if it exists; this causes a crash. So it's safer to save your work and close unneeded running processes - before updating a package. + before updating a package. @@ -152,36 +152,36 @@ The following are some common package management techniques. Before making a decision on a package manager, do some research on the various - techniques, particularly the drawbacks of the particular scheme. + techniques, particularly the drawbacks of each particular scheme. It is All in My Head! - Yes, this is a package management technique. Some folks do not find - the need for a package manager because they know the packages intimately - and know what files are installed by each package. Some users also do not + Yes, this is a package management technique. Some folks do not + need a package manager because they know the packages intimately + and know which files are installed by each package. Some users also do not need any package management because they plan on rebuilding the entire - system when a package is changed. + system whenever a package is changed. Install in Separate Directories - This is a simplistic package management that does not need any extra - package to manage the installations. Each package is installed in a + This is a simplistic package management technique that does not need a + special program to manage the packages. Each package is installed in a separate directory. For example, package foo-1.1 is installed in /usr/pkg/foo-1.1 and a symlink is made from /usr/pkg/foo to - /usr/pkg/foo-1.1. When installing - a new version foo-1.2, it is installed in + /usr/pkg/foo-1.1. When + a new version foo-1.2 comes along, it is installed in /usr/pkg/foo-1.2 and the previous symlink is replaced by a symlink to the new version. Environment variables such as PATH, LD_LIBRARY_PATH, MANPATH, INFOPATH and CPPFLAGS need to be expanded to - include /usr/pkg/foo. For more than a few packages, + include /usr/pkg/foo. If you install more than a few packages, this scheme becomes unmanageable. @@ -190,15 +190,15 @@ Symlink Style Package Management This is a variation of the previous package management technique. - Each package is installed similar to the previous scheme. But instead of - making the symlink, each file is symlinked into the + Each package is installed as in the previous scheme. But instead of + making the symlink via a generic package name, each file is symlinked into the /usr hierarchy. This removes the need to expand the environment variables. Though the symlinks can be - created by the user to automate the creation, many package managers have - been written using this approach. A few of the popular ones include Stow, + created by the user, many package managers use this approach, and + automate the creation of the symlinks. A few of the popular ones include Stow, Epkg, Graft, and Depot. - The installation needs to be faked, so that the package thinks that + The installation script needs to be fooled, so the package thinks it is installed in /usr though in reality it is installed in the /usr/pkg hierarchy. Installing in @@ -216,7 +216,7 @@ make install /usr/pkg/libfoo/1.1/lib/libfoo.so.1 instead of /usr/lib/libfoo.so.1 as you would expect. The correct approach is to use the - DESTDIR strategy to fake installation of the package. This + DESTDIR variable to direct the installation. This approach works as follows: ./configure --prefix=/usr @@ -224,8 +224,8 @@ make make DESTDIR=/usr/pkg/libfoo/1.1 install Most packages support this approach, but there are some which do not. - For the non-compliant packages, you may either need to manually install the - package, or you may find that it is easier to install some problematic + For the non-compliant packages, you may either need to install the + package manually, or you may find that it is easier to install some problematic packages into /opt. @@ -237,14 +237,14 @@ make DESTDIR=/usr/pkg/libfoo/1.1 install the package. After the installation, a simple use of the find command with the appropriate options can generate a log of all the files installed after the timestamp file was created. A - package manager written with this approach is install-log. + package manager that uses this approach is install-log. Though this scheme has the advantage of being simple, it has two drawbacks. If, during installation, the files are installed with any timestamp other than the current time, those files will not be tracked by - the package manager. Also, this scheme can only be used when one package - is installed at a time. The logs are not reliable if two packages are - being installed on two different consoles. + the package manager. Also, this scheme can only be used when packages + are installed one at a time. The logs are not reliable if two packages are + installed simultaneously from two different consoles. @@ -262,12 +262,12 @@ make DESTDIR=/usr/pkg/libfoo/1.1 install calls that modify the filesystem. For this approach to work, all the executables need to be dynamically linked without the suid or sgid bit. Preloading the library may cause some unwanted side-effects during - installation. Therefore, it is advised that one performs some tests to - ensure that the package manager does not break anything and logs all the + installation. Therefore, it's a good idea to perform some tests to + ensure that the package manager does not break anything, and that it logs all the appropriate files. - The second technique is to use strace, which - logs all system calls made during the execution of the installation + Another technique is to use strace, which + logs all the system calls made during the execution of the installation scripts. @@ -275,10 +275,10 @@ make DESTDIR=/usr/pkg/libfoo/1.1 install Creating Package Archives In this scheme, the package installation is faked into a separate - tree as described in the Symlink style package management. After the + tree as previously described in the symlink style package management section. After the installation, a package archive is created using the installed files. - This archive is then used to install the package either on the local - machine or can even be used to install the package on other machines. + This archive is then used to install the package on the local + machine or even on other machines. This approach is used by most of the package managers found in the commercial distributions. Examples of package managers that follow this @@ -289,10 +289,10 @@ make DESTDIR=/usr/pkg/libfoo/1.1 install package management for LFS systems is located at . - Creation of package files that include dependency information is - complex and is beyond the scope of LFS. + The creation of package files that include dependency information is + complex, and beyond the scope of LFS. - Slackware uses a tar based system for package + Slackware uses a tar-based system for package archives. This system purposely does not handle package dependencies as more complex package managers do. For details of Slackware package management, see another computer with the same architecture as the base system is as simple as using tar on the LFS partition that contains the root directory (about 250MB uncompressed for a base LFS build), copying - that file via network transfer or CD-ROM to the new system and expanding - it. From that point, a few configuration files will have to be changed. + that file via network transfer or CD-ROM / USB stick to the new system, and expanding + it. After that, a few configuration files will have to be changed. Configuration files that may need to be updated include: /etc/hosts, /etc/fstab, @@ -342,17 +342,17 @@ make DESTDIR=/usr/pkg/libfoo/1.1 install - A custom kernel may need to be built for the new system depending on + A custom kernel may be needed for the new system, depending on differences in system hardware and the original kernel configuration. There have been some reports of issues when copying between similar but not identical architectures. For instance, the instruction set - for an Intel system is not identical with an AMD processor and later - versions of some processors may have instructions that are unavailable in + for an Intel system is not identical with the AMD processor's instructions, and later + versions of some processors may provide instructions that are unavailable with earlier versions. - Finally the new system has to be made bootable via Finally, the new system has to be made bootable via . diff --git a/chapter09/udev.xml b/chapter09/udev.xml index 435255a19..396f2b389 100644 --- a/chapter09/udev.xml +++ b/chapter09/udev.xml @@ -93,7 +93,7 @@ - + Device Node Creation Device files are created by the kernel by the Then unmount the virtual file systems: umount -v $LFS/dev/pts +mountpoint -q $LFS/dev/shm && umount $LFS/dev/shm umount -v $LFS/dev umount -v $LFS/run umount -v $LFS/proc diff --git a/general.ent b/general.ent index f5e413cd8..e11a6da87 100644 --- a/general.ent +++ b/general.ent @@ -121,8 +121,12 @@ root"> lfs"> +devtmpfs"> /etc/fstab"> /boot"> +"> +"> +"> %packages-entities; diff --git a/packages.ent b/packages.ent index e4fe92680..54b57537f 100644 --- a/packages.ent +++ b/packages.ent @@ -48,20 +48,20 @@ - - + + - + - + - + @@ -114,10 +114,10 @@ - + - + @@ -317,10 +317,10 @@ - + - + @@ -390,18 +390,18 @@ - - + + - + - - + + - + @@ -424,12 +424,12 @@ - + - + - + - + + + - + @@ -694,10 +694,10 @@ - - + + - + diff --git a/part3intro/generalinstructions.xml b/part3intro/generalinstructions.xml index ac2bcc675..f3285bc17 100644 --- a/part3intro/generalinstructions.xml +++ b/part3intro/generalinstructions.xml @@ -11,29 +11,29 @@ General Compilation Instructions - When building packages there are several assumptions made within - the instructions: + Here are some things you should know about building each package: - Several of the packages are patched before compilation, but only when + Several packages are patched before compilation, but only when the patch is needed to circumvent a problem. A patch is often needed in - both this and the following chapters, but sometimes in only one location. + both the current and the following chapters, but sometimes, when the same package + is built more than once, the patch is not needed right away. Therefore, do not be concerned if instructions for a downloaded patch seem to be missing. Warning messages about offset or fuzz may also be encountered when applying a patch. Do - not worry about these warnings, as the patch was still successfully + not worry about these warnings; the patch was still successfully applied. - During the compilation of most packages, there will be several - warnings that scroll by on the screen. These are normal and can safely be - ignored. These warnings are as they appear—warnings about + During the compilation of most packages, some + warnings will scroll by on the screen. These are normal and can safely be + ignored. These warnings are usually about deprecated, but not invalid, use of the C or C++ syntax. C standards change - fairly often, and some packages still use the older standard. This is not a - problem, but does prompt the warning. + fairly often, and some packages have not yet been updated. This is not a + serious problem, but it does cause the warnings to appear. @@ -69,25 +69,25 @@ symbolic link to gawk. /usr/bin/yacc is a - symbolic link to bison or a small script that + symbolic link to bison, or to a small script that executes bison. - To re-emphasize the build process: + Here is a synopsis of the build process. Place all the sources and patches in a directory that will be - accessible from the chroot environment such as + accessible from the chroot environment, such as /mnt/lfs/sources/. - Change to the sources directory. + Change to the /mnt/lfs/sources/ directory. For each package: @@ -97,22 +97,21 @@ to be built. In and , ensure you are the lfs user when extracting the package. - All methods to get the source code tree being built - in-position, except extracting the package tarball, are not - supported. Notably, using cp -R to copy the + Do not use any method except the tar command + to extract the source code. Notably, using the cp -R + command to copy the source code tree somewhere else can destroy links and - timestamps in the sources tree and cause building - failure. + timestamps in the source tree, and cause the build to fail. Change to the directory created when the package was extracted. - Follow the book's instructions for building the package. + Follow the instructions for building the package. - Change back to the sources directory. + Change back to the sources directory when the build is complete. Delete the extracted source directory unless instructed otherwise. diff --git a/part3intro/introduction.xml b/part3intro/introduction.xml index 6d30ffe49..03ac66a87 100644 --- a/part3intro/introduction.xml +++ b/part3intro/introduction.xml @@ -10,25 +10,25 @@ Introduction - This part is divided into three stages: first building a cross - compiler and its associated libraries; second, use this cross toolchain + This part is divided into three stages: first, building a cross + compiler and its associated libraries; second, using this cross toolchain to build several utilities in a way that isolates them from the host - distribution; third, enter the chroot environment, which further improves - host isolation, and build the remaining tools needed to build the final + distribution; and third, entering the chroot environment (which further improves + host isolation) and constructing the remaining tools needed to build the final system. - With this part begins the real work of building a new - system. It requires much care in ensuring that the instructions are - followed exactly as the book shows them. You should try to understand - what they do, and whatever your eagerness to finish your build, you should - refrain from blindly type them as shown, but rather read documentation when + This is where the real work of building a new system + begins. Be very careful to follow the instructions exactly as the book + shows them. You should try to understand what each command does, + and no matter how eager you are to finish your build, you should + refrain from blindly typing the commands as shown. Read the documentation when there is something you do not understand. Also, keep track of your typing - and of the output of commands, by sending them to a file, using the - tee utility. This allows for better diagnosing - if something gets wrong. + and of the output of commands, by using the tee utility + to send the terminal output to a file. This makes debugging easier + if something goes wrong. - The next section gives a technical introduction to the build process, - while the following one contains very + The next section is a technical introduction to the build process, + while the following one presents very important general instructions. diff --git a/part3intro/toolchaintechnotes.xml b/part3intro/toolchaintechnotes.xml index 93f27f267..c5b8dc577 100644 --- a/part3intro/toolchaintechnotes.xml +++ b/part3intro/toolchaintechnotes.xml @@ -11,26 +11,26 @@ Toolchain Technical Notes This section explains some of the rationale and technical details - behind the overall build method. It is not essential to immediately + behind the overall build method. Don't try to immediately understand everything in this section. Most of this information will be - clearer after performing an actual build. This section can be referred - to at any time during the process. + clearer after performing an actual build. Come back and re-read this chapter + at any time during the build process. The overall goal of and is to produce a temporary area that - contains a known-good set of tools that can be isolated from the host system. - By using chroot, the commands in the remaining chapters - will be contained within that environment, ensuring a clean, trouble-free + linkend="chapter-temporary-tools"/> is to produce a temporary area + containing a set of tools that are known to be good, and that are isolated from the host system. + By using the chroot command, the compilations in the remaining chapters + will be isolated within that environment, ensuring a clean, trouble-free build of the target LFS system. The build process has been designed to - minimize the risks for new readers and to provide the most educational value + minimize the risks for new readers, and to provide the most educational value at the same time. - The build process is based on the process of + This build process is based on cross-compilation. Cross-compilation is normally used - for building a compiler and its toolchain for a machine different from - the one that is used for the build. This is not strictly needed for LFS, + to build a compiler and its associated toolchain for a machine different from + the one that is used for the build. This is not strictly necessary for LFS, since the machine where the new system will run is the same as the one - used for the build. But cross-compilation has the great advantage that + used for the build. But cross-compilation has one great advantage: anything that is cross-compiled cannot depend on the host environment. @@ -39,47 +39,46 @@ - The LFS book is not, and does not contain a general tutorial to - build a cross (or native) toolchain. Don't use the command in the - book for a cross toolchain which will be used for some purpose other + The LFS book is not (and does not contain) a general tutorial to + build a cross (or native) toolchain. Don't use the commands in the + book for a cross toolchain for some purpose other than building LFS, unless you really understand what you are doing. - Cross-compilation involves some concepts that deserve a section on - their own. Although this section may be omitted in a first reading, - coming back to it later will be beneficial to your full understanding of + Cross-compilation involves some concepts that deserve a section of + their own. Although this section may be omitted on a first reading, + coming back to it later will help you gain a fuller understanding of the process. - Let us first define some terms used in this context: + Let us first define some terms used in this context. - build + The build is the machine where we build programs. Note that this machine - is referred to as the host in other - sections. + is also referred to as the host. - host + The host is the machine/system where the built programs will run. Note that this use of host is not the same as in other sections. - target + The target is only used for compilers. It is the machine the compiler - produces code for. It may be different from both build and - host. + produces code for. It may be different from both the build and + the host. As an example, let us imagine the following scenario (sometimes - referred to as Canadian Cross): we may have a + referred to as Canadian Cross): we have a compiler on a slow machine only, let's call it machine A, and the compiler - ccA. We may have also a fast machine (B), but with no compiler, and we may - want to produce code for another slow machine (C). To build a - compiler for machine C, we would have three stages: + ccA. We also have a fast machine (B), but no compiler for (B), and we + want to produce code for a third, slow machine (C). We will build a + compiler for machine C in three stages. @@ -95,24 +94,24 @@ 1AAB - build cross-compiler cc1 using ccA on machine A + Build cross-compiler cc1 using ccA on machine A. 2ABC - build cross-compiler cc2 using cc1 on machine A + Build cross-compiler cc2 using cc1 on machine A. 3BCC - build compiler ccC using cc2 on machine B + Build compiler ccC using cc2 on machine B. - Then, all the other programs needed by machine C can be compiled + Then, all the programs needed by machine C can be compiled using cc2 on the fast machine B. Note that unless B can run programs - produced for C, there is no way to test the built programs until machine - C itself is running. For example, for testing ccC, we may want to add a + produced for C, there is no way to test the newly built programs until machine + C itself is running. For example, to run a test suite on ccC, we may want to add a fourth stage: @@ -129,7 +128,7 @@ 4CCC - rebuild and test ccC using itself on machine C + Rebuild and test ccC using ccC on machine C. @@ -146,44 +145,62 @@ Implementation of Cross-Compilation for LFS - Almost all the build systems use names of the form - cpu-vendor-kernel-os referred to as the machine triplet. An astute - reader may wonder why a triplet refers to a four component - name. The reason is history: initially, three component names were enough - to designate a machine unambiguously, but with new machines and systems - appearing, that proved insufficient. The word triplet - remained. A simple way to determine your machine triplet is to run - the config.guess + All packages involved with cross compilation in the book use an + autoconf-based building system. The autoconf-based building system + accepts system types in the form cpu-vendor-kernel-os, + referred to as the system triplet. Since the vendor field is mostly + irrelevant, autoconf allows to omit it. An astute reader may wonder + why a triplet refers to a four component name. The + reason is the kernel field and the os field originated from one + system field. Such a three-field form is still valid + today for some systems, for example + x86_64-unknown-freebsd. But for other systems, + two systems can share the same kernel but still be too different to + use a same triplet for them. For example, an Android running on a + mobile phone is completely different from Ubuntu running on an ARM64 + server, despite they are running on the same type of CPU (ARM64) and + using the same kernel (Linux). + Without an emulation layer, you cannot run an + executable for the server on the mobile phone or vice versa. So the + system field is separated into kernel and os fields to + designate these systems unambiguously. For our example, the Android + system is designated aarch64-unknown-linux-android, + and the Ubuntu system is designated + aarch64-unknown-linux-gnu. The word + triplet remained. A simple way to determine your + system triplet is to run the config.guess script that comes with the source for many packages. Unpack the binutils sources and run the script: ./config.guess and note the output. For example, for a 32-bit Intel processor the output will be i686-pc-linux-gnu. On a 64-bit - system it will be x86_64-pc-linux-gnu. + system it will be x86_64-pc-linux-gnu. On most + Linux systems the even simpler gcc -dumpmachine command + will give you similar information. - Also be aware of the name of the platform's dynamic linker, often + You should also be aware of the name of the platform's dynamic linker, often referred to as the dynamic loader (not to be confused with the standard linker ld that is part of binutils). The dynamic linker - provided by Glibc finds and loads the shared libraries needed by a + provided by package glibc finds and loads the shared libraries needed by a program, prepares the program to run, and then runs it. The name of the dynamic linker for a 32-bit Intel machine is ld-linux.so.2 and is ld-linux-x86-64.so.2 for 64-bit systems. A + class="libraryfile">ld-linux.so.2; it's ld-linux-x86-64.so.2 on 64-bit systems. A sure-fire way to determine the name of the dynamic linker is to inspect a random binary from the host system by running: readelf -l <name of binary> | grep interpreter and noting the output. The authoritative reference covering all platforms is in the - shlib-versions file in the root of the Glibc source + shlib-versions file in the root of the glibc source tree. In order to fake a cross compilation in LFS, the name of the host triplet is slightly adjusted by changing the "vendor" field in the - LFS_TGT variable. We also use the + LFS_TGT variable so it says "lfs". We also use the --with-sysroot option when building the cross linker and cross compiler to tell them where to find the needed host files. This ensures that none of the other programs built in can link to libraries on the build - machine. Only two stages are mandatory, and one more for tests: + machine. Only two stages are mandatory, plus one more for tests. @@ -199,47 +216,63 @@ 1pcpclfs - build cross-compiler cc1 using cc-pc on pc + Build cross-compiler cc1 using cc-pc on pc. 2pclfslfs - build compiler cc-lfs using cc1 on pc + Build compiler cc-lfs using cc1 on pc. 3lfslfslfs - rebuild and test cc-lfs using itself on lfs + Rebuild and test cc-lfs using cc-lfs on lfs. - In the above table, on pc means the commands are run + In the preceding table, on pc means the commands are run on a machine using the already installed distribution. On lfs means the commands are run in a chrooted environment. Now, there is more about cross-compiling: the C language is not just a compiler, but also defines a standard library. In this book, the - GNU C library, named glibc, is used. This library must - be compiled for the lfs machine, that is, using the cross compiler cc1. + GNU C library, named glibc, is used (there is an alternative, "musl"). This library must + be compiled for the LFS machine; that is, using the cross compiler cc1. But the compiler itself uses an internal library implementing complex - instructions not available in the assembler instruction set. This - internal library is named libgcc, and must be linked to the glibc + subroutines for functions not available in the assembler instruction set. This + internal library is named libgcc, and it must be linked to the glibc library to be fully functional! Furthermore, the standard library for - C++ (libstdc++) also needs being linked to glibc. The solution to this - chicken and egg problem is to first build a degraded cc1 based libgcc, - lacking some functionalities such as threads and exception handling, then - build glibc using this degraded compiler (glibc itself is not - degraded), then build libstdc++. But this last library will lack the - same functionalities as libgcc. + C++ (libstdc++) must also be linked with glibc. The solution to this + chicken and egg problem is first to build a degraded cc1-based libgcc, + lacking some functionalities such as threads and exception handling, and then + to build glibc using this degraded compiler (glibc itself is not + degraded), and also to build libstdc++. This last library will lack some of the + functionality of libgcc. - This is not the end of the story: the conclusion of the preceding + This is not the end of the story: the upshot of the preceding paragraph is that cc1 is unable to build a fully functional libstdc++, but this is the only compiler available for building the C/C++ libraries during stage 2! Of course, the compiler built during stage 2, cc-lfs, would be able to build those libraries, but (1) the build system of - GCC does not know that it is usable on pc, and (2) using it on pc - would be at risk of linking to the pc libraries, since cc-lfs is a native - compiler. So we have to build libstdc++ later, in chroot. + gcc does not know that it is usable on pc, and (2) using it on pc + would create a risk of linking to the pc libraries, since cc-lfs is a native + compiler. So we have to re-build libstdc++ later as a part of + gcc stage 2. + + In &ch-final; (or stage 3), all packages needed for + the LFS system are built. Even if a package is already installed into + the LFS system in a previous chapter, we still rebuild the package + unless we are completely sure it's unnecessary. The main reason for + rebuilding these packages is to settle them down: if we reinstall a LFS + package on a complete LFS system, the installed content of the package + should be same as the content of the same package installed in + &ch-final;. The temporary packages installed in &ch-tmp-cross; or + &ch-tmp-chroot; cannot satisify this expectation because some of them + are built without optional dependencies installed, and autoconf cannot + perform some feature checks in &ch-tmp-cross; because of cross + compilation, causing the temporary packages to lack optional features + or use suboptimal code routines. Additionally, a minor reason for + rebuilding the packages is allowing to run the testsuite. @@ -252,10 +285,10 @@ be part of the final system. Binutils is installed first because the configure - runs of both GCC and Glibc perform various feature tests on the assembler + runs of both gcc and glibc perform various feature tests on the assembler and linker to determine which software features to enable or disable. This - is more important than one might first realize. An incorrectly configured - GCC or Glibc can result in a subtly broken toolchain, where the impact of + is more important than one might realize at first. An incorrectly configured + gcc or glibc can result in a subtly broken toolchain, where the impact of such breakage might not show up until near the end of the build of an entire distribution. A test suite failure will usually highlight this error before too much additional work is performed. @@ -274,14 +307,14 @@ $LFS_TGT-gcc dummy.c -Wl,--verbose 2>&1 | grep succeeded will show all the files successfully opened during the linking. - The next package installed is GCC. An example of what can be + The next package installed is gcc. An example of what can be seen during its run of configure is: checking what assembler to use... /mnt/lfs/tools/i686-lfs-linux-gnu/bin/as checking what linker to use... /mnt/lfs/tools/i686-lfs-linux-gnu/bin/ld This is important for the reasons mentioned above. It also - demonstrates that GCC's configure script does not search the PATH + demonstrates that gcc's configure script does not search the PATH directories to find which tools to use. However, during the actual operation of gcc itself, the same search paths are not necessarily used. To find out which standard linker gcc @@ -295,12 +328,12 @@ checking what linker to use... /mnt/lfs/tools/i686-lfs-linux-gnu/bin/ld Next installed are sanitized Linux API headers. These allow the - standard C library (Glibc) to interface with features that the Linux + standard C library (glibc) to interface with features that the Linux kernel will provide. - The next package installed is Glibc. The most important - considerations for building Glibc are the compiler, binary tools, and - kernel headers. The compiler is generally not an issue since Glibc will + The next package installed is glibc. The most important + considerations for building glibc are the compiler, binary tools, and + kernel headers. The compiler is generally not an issue since glibc will always use the compiler relating to the --host parameter passed to its configure script; e.g. in our case, the compiler will be $LFS_TGT-gcc. The binary tools and kernel @@ -313,30 +346,31 @@ checking what linker to use... /mnt/lfs/tools/i686-lfs-linux-gnu/bin/ld$LFS_TGT expanded) to control which binary tools are used and the use of the -nostdinc and -isystem flags to control the compiler's include - search path. These items highlight an important aspect of the Glibc + search path. These items highlight an important aspect of the glibc package—it is very self-sufficient in terms of its build machinery and generally does not rely on toolchain defaults. - As said above, the standard C++ library is compiled next, followed in - by all the programs that need - themselves to be built. The install step of all those packages uses the - DESTDIR variable to have the - programs land into the LFS filesystem. + As mentioned above, the standard C++ library is compiled next, followed in + by other programs that need + to be cross compiled for breaking circular dependencies at build time. + The install step of all those packages uses the + DESTDIR variable to force installation + in the LFS filesystem. At the end of the native - lfs compiler is installed. First binutils-pass2 is built, - with the same DESTDIR install as the other programs, - then the second pass of GCC is constructed, omitting libstdc++ - and other non-important libraries. Due to some weird logic in GCC's + LFS compiler is installed. First binutils-pass2 is built, + in the same DESTDIR directory as the other programs, + then the second pass of gcc is constructed, omitting some + non-critical libraries. Due to some weird logic in gcc's configure script, CC_FOR_TARGET ends up as - cc when the host is the same as the target, but is + cc when the host is the same as the target, but different from the build system. This is why - CC_FOR_TARGET=$LFS_TGT-gcc is put explicitly into - the configure options. + CC_FOR_TARGET=$LFS_TGT-gcc is declared explicitly + as one of the configuration options. Upon entering the chroot environment in , the first task is to install - libstdc++. Then temporary installations of programs needed for the proper + linkend="chapter-chroot-temporary-tools"/>, + the temporary installations of programs needed for the proper operation of the toolchain are performed. From this point onwards, the core toolchain is self-contained and self-hosted. In , final versions of all the