kdump+crash solves the sleep lock (mutex) deadlock problem

Table of Contents

1. Experiment purpose: Use crash to solve the dormant lock (mutex) deadlock problem

2. Experimental steps

3. Brief description of hung task mechanism

4. Analysis of mutex deadlock problem

4.1. Confirm deadlock thread

4.1.1. Use the ps | grep UN command to determine the D state thread in the crash, and use bt pid to view the D state process information.

4.1.2. The hung task mechanism determines deadlock threads

4.2. Find the corresponding sleep lock according to the deadlock thread

4.3. Analyze the content of the mutex lock and parse out the threads that acquire the lock and wait for the lock.

4.3.1. Analysis of key members of struct mutex

4.3.2. Parse the two key members of mutex

4.4. Use the bt command to view the stack traceback of the blocked thread.

Environment: arm64, Linux version 5.10.66

1. Experiment purpose: use crash to solve the sleep lock (mutex) deadlock problem

The experimental program is as follows. When the program is compiled into ko and loaded to the device using insmod, enter the echo kdump-3 > /proc/dbug/dump command on the serial port and then execute our test program. The test program is typical AB-BA is deadlocked. After waiting for 2 minutes, the hung task detects that a thread has not been scheduled for 2 minutes and calls panic to trigger kdump. After kdump generates the vmcore file, use the crash command to analyze the sleep lock (mutex) deadlock problem.

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
#include <asm/io.h>

#include <linux/device.h>
#include <linux/proc_fs.h>
#include <linux/version.h>

#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
/*
If the write is successful, the number of bytes written must be returned, otherwise Linux will still try to write
 */

#define PROC_DIR_NAME "dbug" //Folder name
#define PROC_FILE_NAME "kdump" //File name
#define KBUFSIZE 40
#define NAMELEN 20

typedef struct kdump_mutex {
char cName[NAMELEN];
int iDelay;
int iTime;
struct mutex lock;
struct task_struct *thread;
void (*printk)(void *p);
} KdumpMutex_st;

static KdumpMutex_st *gpstKdMutex1 = NULL;
static KdumpMutex_st *gpstKdMutex2 = NULL;

void kdump_printk(KdumpMutex_st *pstKdMutex)
{
if(pstKdMutex != NULL)
printk("Mutex lock name %s time = %d \
", pstKdMutex->cName, pstKdMutex->iTime);
return;
}

static int mutex_thread1(void* arg)
{
\t
KdumpMutex_st *pstKdMutex = NULL;
pstKdMutex = (KdumpMutex_st *)arg;
\t
/* Get mutex lock 1 */
mutex_lock( & amp;pstKdMutex->lock);
pstKdMutex->iTime + + ;
pstKdMutex->printk(pstKdMutex);
msleep(pstKdMutex->iDelay);

/* Get mutex lock 2 */
mutex_lock( & amp;gpstKdMutex2->lock);

mutex_unlock( & amp;pstKdMutex->lock);
mutex_unlock( & amp;gpstKdMutex2->lock);
return 0;
}

static int mutex_thread2(void* arg)
{
\t
KdumpMutex_st *pstKdMutex = NULL;
pstKdMutex = (KdumpMutex_st *)arg;
\t
/* Get mutex lock 2 */
mutex_lock( & amp;pstKdMutex->lock);
pstKdMutex->iTime + + ;
pstKdMutex->printk(pstKdMutex);
msleep(pstKdMutex->iDelay);

/* Get mutex lock 1 */
mutex_lock( & amp;gpstKdMutex1->lock);

mutex_unlock( & amp;pstKdMutex->lock);
mutex_unlock( & amp;gpstKdMutex1->lock);
return 0;
}

int proc_kdump_mutex(int delay)
{
/* Fill in the data of thread 1 and run the thread */
gpstKdMutex1 = kzalloc(sizeof(KdumpMutex_st), GFP_KERNEL);
strncpy(gpstKdMutex1->cName, "mutex thread_1", NAMELEN);
mutex_init( & amp;gpstKdMutex1->lock);
gpstKdMutex1->printk = (void (*)(void *p))kdump_printk;
gpstKdMutex1->iDelay = delay;
gpstKdMutex1->thread = kthread_run(mutex_thread1, gpstKdMutex1, gpstKdMutex1->cName);
\t
/* Fill in the data of thread 2 and run the thread */
gpstKdMutex2 = kzalloc(sizeof(KdumpMutex_st), GFP_KERNEL);
strncpy(gpstKdMutex2->cName, "mutex thread_2", NAMELEN);
mutex_init( & amp;gpstKdMutex2->lock);
gpstKdMutex2->printk = (void (*)(void *p))kdump_printk;
gpstKdMutex2->iDelay = delay;
gpstKdMutex2->thread = kthread_run(mutex_thread2, gpstKdMutex2, gpstKdMutex2->cName);
\t
return 0;
}


/* Create a debugging interface for /proc/dbug/kdump, you don’t need to pay attention to this part */
char kbuf[KBUFSIZE] = {0}; //Save the data passed in from the user layer
struct proc_dir_entry *proc_wrbuff_dir;

static int proc_wrbuff_open(struct inode *inode,struct file *file);
static ssize_t proc_wrbuff_read(struct file *file, char __user *ubuf, size_t count, loff_t *offset);
static ssize_t proc_wrbuff_write(struct file *file, const char __user *ubuf, size_t count, loff_t *offset);


static int proc_wrbuff_open(struct inode *inode,struct file *file) {
printk("open embedsky board device!\
");
return 0;
}

static ssize_t proc_wrbuff_read(struct file *file, char __user *ubuf, size_t count, loff_t *offset) {
if (count > strlen(kbuf))
count = strlen(kbuf);
if (count < 0 )
return -3;
if (copy_to_user(ubuf, kbuf, count)) {
printk(KERN_ERR "copy_to_user failed! \
");
return -4;
}
return count;
}

static ssize_t proc_wrbuff_write(struct file *file, const char __user *ubuf, size_t count, loff_t *offset) {
\t
int num = 0;
size_t cnt = min((int)count, KBUFSIZE - 1);
if(copy_from_user(kbuf,ubuf,cnt)) {
printk(KERN_ERR "copy_to_user failed! \
");
return -EFAULT;
}
kbuf[cnt] = '\0';
printk("printk kbuf %s \
",kbuf);

if(sscanf(kbuf, "kdump-%d", & amp;num)) {
proc_kdump_mutex(num);
}

return cnt;
}

#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 10, 0)
static struct proc_ops fops_proc_wrbuffer = {

.proc_open = proc_wrbuff_open,
.proc_read = proc_wrbuff_read,
.proc_write = proc_wrbuff_write,
 };
#else
static struct file_operations fops_proc_wrbuffer = {
.owner = THIS_MODULE,
.open = proc_wrbuff_open,
.read = proc_wrbuff_read,
.write = proc_wrbuff_write,
.owner = THIS_MODULE,
 };
#endif


static int __init proc_wrbuff_init(void) {
int ret = 0;
struct proc_dir_entry *proc_file;

/* 1 create parent dir in /porc/dbug */
proc_wrbuff_dir = proc_mkdir(PROC_DIR_NAME, NULL);
if(!proc_wrbuff_dir){
printk("create proc dir error! \
");
return -1;
}
/* 2 create device file in /proc/parent dir*/
proc_file = proc_create_data(PROC_FILE_NAME, 0666, proc_wrbuff_dir, & amp;fops_proc_wrbuffer,0);
if (!proc_file) {
printk("create proc file error! \
");
ret = -2;
goto no_proc_file;
}

return 0;

no_proc_file:
remove_proc_entry(PROC_FILE_NAME,proc_wrbuff_dir);
return ret;
}

static void __exit proc_wrbuff_exit(void) {

remove_proc_entry(PROC_FILE_NAME,proc_wrbuff_dir);
remove_proc_entry(PROC_DIR_NAME, NULL);
}
late_initcall(proc_wrbuff_init);
module_exit(proc_wrbuff_exit);

MODULE_DESCRIPTION("debug");
MODULE_LICENSE("GPL");

2. Experimental steps

Prerequisite: The device Linux supports the kdump function. For support methods, please refer to this article.

a. Compile the test module: Compile the above source code into the kdump.ko driver module, and use the insmod command in the device serial port to load this driver into the device. At this time, the /proc/dbug/kdump debug file will be generated

b. Trigger panic: Execute the echo kdump-4 > /proc/dbug/kdump command to execute the test program. The test program will execute panic to trigger the kdump mechanism, start the capture kernel, and generate the /proc/vmcore file in the capture kernel.

c. Save the vmcore file: execute cd /proc;tar -czf /tmp/3588/vmcore.tar.gz ./vmcore to compress the vmcore file under the captured kernel and copy it to a USB disk or NFS mounted directory.

d. Use crash to analyze the vmcore file: Execute the crash vmlinux vmcore command to use crash to analyze the vmcore file.

e. Since the vmcore file will only retain the code part of kdump.ko, you need to use mod to load the debugging and symbol information of the kdump.ko driver module in crash. Executing the crash> dis -l kdump_proc_write command in this way will correctly display the line number information corresponding to the assembly code.

mod -s kdump /kdump/demo/stack/kdump.ko

3. Brief description of hung task mechanism

a. core idea of hung task

To create a kernel monitoring process (khungtaskd), it cyclically monitors each process (task) in the D state, and counts the number of times they are scheduled between two detections. If it is found that there is a task that has not been scheduled between two monitoring times, It can be judged that the process has been in the D state and is likely to be deadlocked. Therefore, panic is called to output the basic information of the process, stack traceback, and register storage information for kernel developers to locate. The loop period is configured through CONFIG_DEFAULT_HUNG_TASK_TIMEOUT, in seconds.

b. Possible reasons for triggering the hung task mechanism

The synchronization mechanisms such as completion, mutex, and wait event used by related tasks or kernel threads wait for conditions to be met. During the waiting process, they are not awakened after the time set by CONFIG_DEFAULT_HUNG_TASK_TIMEOUT.

C, hung task mechanism proc interface

/proc/sys/kernel/hung_task_all_cpu_backtrace
/* The maximum number of hung task detection processes, including non-D state processes */
/proc/sys/kernel/hung_task_check_count
/proc/sys/kernel/hung_task_check_interval_secs
/* Whether hung task causes kernel panic */
/proc/sys/kernel/hung_task_panic
/* The time threshold for the D state process to trigger the hung task mechanism is generally 120 seconds. If set to 0, the khungtaskd process will no longer be scheduled and the hung task function will be turned off */
/proc/sys/kernel/hung_task_timeout_secs
/* The maximum number of hung task alarm messages */
/proc/sys/kernel/hung_task_warnings

D state: TASK_UNINTERRUPTIBLE, a waiting state of the thread. In this state, the process does not receive signals and can only be awakened through wake_up.

4. Analysis of mutex deadlock problem

There are three key points to solve the sleep deadlock problem:

a. Find the deadlocked thread

b. Obtain the lock according to the deadlock thread

c. Analyze the threads that acquire locks and wait for locks

d. View the stack information of the deadlock thread

4.1. Confirm deadlock thread

For mutex locks, when a thread cannot acquire the lock, it enters the D state and sleeps. When a deadlock problem occurs, the relevant threads will also sleep and enter the D state. Mutex deadlock can be detected through the hung task thread, and the hung task triggers panic, so the stack printed by bt is hung task information and has nothing to do with the deadlock thread. Here are two methods to find deadlock threads.

crash> bt
PID: 59 TASK: ffffff81010b0000 CPU: 3 COMMAND: "khungtaskd"
 #0 [ffffffc0127bbb20] machine_kexec at ffffffc01001f63c
 #1 [ffffffc0127bbb70] __crash_kexec at ffffffc0100d91bc
 #2 [ffffffc0127bbd00] panic at ffffffc010e847f4
 #3 [ffffffc0127bbde0] watchdog at ffffffc0100ef918
 #4 [ffffffc0127bbe60] kthread at ffffffc010058270
4.1.1. Use the ps | grep UN command to determine the D state in the crash Status thread, bt pid View D status process information

The a, ps | grep UN command will print out all threads in the D state, which must include the two deadlocked threads.

crash> ps | grep UN
      373 2 4 ffffff8102012400 UN 0.0 0 0 [OSA_372_1]
     5544 2 4 ffffff8106bf6c00 UN 0.0 0 0 [mutex thread_1]
     5545 2 5 ffffff8106bf3600 UN 0.0 0 0 [mutex thread_2]
crash>

b, bt pid command View the stack traceback print of each D state thread, determine the reason why the thread entered the D state, and find the thread that entered the D state due to deadlock.

crash> bt 373
PID: 373 TASK: ffffff8102012400 CPU: 4 COMMAND: "OSA_372_1"
 #0 [ffffffc012d93c20] __switch_to at ffffffc010007e88
 #1 [ffffffc012d93c50] __schedule at ffffffc010eafa54
 #2 [ffffffc012d93cd0] schedule at ffffffc010eafd20
 #3 [ffffffc012d93cf0] schedule_timeout at ffffffc010eb23bc
 #4 [ffffffc012d93d60] schedule_timeout_uninterruptible at ffffffc010eb2470
 #5 [ffffffc012d93d70] msleep at ffffffc0100b8890
 #6 [ffffffc012d93d80] OSA_msleep at ffffffc008ee9e58 [osa]
 #7 [ffffffc012d93d90] OSA_tskRun at ffffffc008eedc28 [osa]
 #8 [ffffffc012d93e10] OSA_thrRunBody at ffffffc008ee99cc [osa]
 #9 [ffffffc012d93e60] kthread at ffffffc010058270

Check the stack traceback of the 373 [OSA_372_1] process and find that this thread calls msleep to enter the D state, which has nothing to do with locks.

crash> bt 5544
PID: 5544 TASK: ffffff8106bf6c00 CPU: 4 COMMAND: "mutex thread_1"
 #0 [ffffffc01334bcc0] __switch_to at ffffffc010007e88
 #1 [ffffffc01334bcf0] __schedule at ffffffc010eafa54
 #2 [ffffffc01334bd70] schedule at ffffffc010eafd20
 #3 [ffffffc01334bd90] schedule_preempt_disabled at ffffffc010eaff90
 #4 [ffffffc01334bda0] __mutex_lock.constprop.0 at ffffffc010eb0e08
 #5 [ffffffc01334be10] __mutex_lock_slowpath at ffffffc010eb0fe0
 #6 [ffffffc01334be20] mutex_lock at ffffffc010eb1024
 #7 [ffffffc01334be40] mutex_thread1 at ffffffc008fb8100 [kdump]
 #8 [ffffffc01334be60] kthread at ffffffc010058270
crash>

Check the thread stack traceback of 5544 [mutex thread_1] and find that this function enters the D state because it cannot acquire the lock. It can basically be confirmed that it is a deadlock thread. Similarly, check the 5545 [mutex thread_2] thread for similar printing.

Summary: Sleep lock (mutex, rw_samaphore) deadlock will cause threads to enter the dormant D state. Use the ps | grep UN command to determine all D state threads; use the bt pid command to check the reason why the D state thread sleeps and find out why. A thread that cannot acquire the lock and goes to sleep.

4.1.2, hung task mechanism determines deadlock threads

The hung task mechanism will print out the thread information of threads in D state that have not been scheduled for more than 2 minutes. The D state threads printed by hung task are most likely deadlock threads. You can use the bt pid command to view the stack traceback to determine.

~ # echo kdump-3000 > /proc/dbug/kdump
[598.895301] Mutex lock name mutex thread_1 time = 1
[598.895336] Mutex lock name mutex thread_2 time = 1

~ # [ 773.271035] INFO: task mutex thread_1:5544 blocked for more th[ 773.271052] task:mutex thread_1 state:D stack: 0 pid: 5544 ppid: 2 flags:0x00000008
[773.271057] Call trace:
[773.271064] __switch_to + 0x134/0x1ac
[773.271069] __schedule + 0x3bc/0x590
[773.271072] schedule + 0x80/0xc8
[773.271076] schedule_preempt_disabled + 0x1c/0x28
[773.271080] __mutex_lock.constprop.0 + 0x168/0x234
[773.271083] __mutex_lock_slowpath + 0x1c/0x28
[773.271087] mutex_lock + 0x38/0x48
[773.271093] mutex_thread1 + 0x60/0x88 [kdump]
[773.271096] kthread + 0xf8/0x108
[773.271100] ret_from_fork + 0x10/0x18

[773.271103] INFO: task mutex thread_2:5545 blocked for more than 122 seconds.
[773.271105] Tainted: P O 5.10.66 #1
[ 773.271108] “echo 0 > /proc/sys/kernel/hung_task_timeout_secs” disables this message.
[773.271110] task:mutex thread_2 state:D stack: 0 pid: 5545 ppid: 2 flags:0x00000008
[ 773.271049] “[ 773.436617] dump_stack + 0x18/0x34
[773.439923] panic + 0x170/0x35c
echo 0 > /proc/sys/kernel/hung_task_timeou[ 773.446618] watchdog + 0x154/0x35c
[773.449923] kthread + 0xf8/0x108
t_secs” disables this message.
[773.455831] ret_from_fork + 0x10/0x18
[773.459407] SMP: stopping secondary CPUs
[773.463530] Starting crashdump kernel…
[773.467451] Bye!

4.2. Find the corresponding sleep lock according to the deadlock thread

According to the stack traceback printing of the deadlock thread, it is easy to find in the program the specific lock used in the thread that caused the deadlock.

crash> bt 5544
PID: 5544 TASK: ffffff8106bf6c00 CPU: 4 COMMAND: "mutex thread_1"
 #0 [ffffffc01334bcc0] __switch_to at ffffffc010007e88
 #1 [ffffffc01334bcf0] __schedule at ffffffc010eafa54
 #2 [ffffffc01334bd70] schedule at ffffffc010eafd20
 #3 [ffffffc01334bd90] schedule_preempt_disabled at ffffffc010eaff90
 #4 [ffffffc01334bda0] __mutex_lock.constprop.0 at ffffffc010eb0e08
 #5 [ffffffc01334be10] __mutex_lock_slowpath at ffffffc010eb0fe0
 #6 [ffffffc01334be20] mutex_lock at ffffffc010eb1024
 #7 [ffffffc01334be40] mutex_thread1 at ffffffc008fb8100 [kdump]
 #8 [ffffffc01334be60] kthread at ffffffc010058270
crash>

In this example, it is easy to know that there are two mutex locks in the mutex_thread1 thread:

gpstKdMutex1->lock and gpstKdMutex2->lock;

static int mutex_thread1(void* arg)
{
KdumpMutex_st *pstKdMutex = NULL;
pstKdMutex = (KdumpMutex_st *)arg;
/* Get mutex lock 1 */
mutex_lock( & amp;pstKdMutex->lock);
    ............................
/* Get mutex lock 2 */
mutex_lock( & amp;gpstKdMutex2->lock);

mutex_unlock( & amp;pstKdMutex->lock);
mutex_unlock( & amp;gpstKdMutex2->lock);
return 0;
}

Get the specific content of the lock

crash> rd gpstKdMutex1
ffffffc008fba480: ffffff810791b080…..

crash> struct KdumpMutex_st.lock ffffff810791b080 -x
lock = {
owner = {
counter = 0xffffff8106bf6c01
},
wait_list = {
next = 0xffffffc013353de8,
prev = 0xffffffc013353de8
},
}

4.3. Analyze the content of the mutex lock and parse out the threads that acquire the lock and wait for the lock

4.3.1, struct mutex key member analysis
struct mutex {
    atomic_long_t owner;
    struct list_head wait_list;
};

a, atomic_long_t owner member

The owner member is divided into 2 parts: for the arm64 architecture, the owner member is a 64-bit long data, bit[6:63] saves the thread address that acquires this mutex lock, and It is the address of the thread struct task_struc structure; remove the lower 3 bits from bit[0:5] as the mark bit of the mutex lock.

owner member bit[0 : 5]Meaning: The lower 6 bits of the arm64 architecture struct task_struct structure address are zero, and the mutex lock will only use the lower 3 bits. The specific meaning is defined as follows :

/* @owner: contains: ‘struct task_struct *’ to the current lock owner,
* NULL means not owned.
* Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
* Bit1 indicates unlock needs to hand the lock to the top-waiter
* Bit2 indicates handoff has been done and we’re waiting for pickup */
#define MUTEX_FLAG_WAITERS 0x01
#define MUTEX_FLAG_HANDOFF 0x02
#define MUTEX_FLAG_PICKUP 0x04
#define MUTEX_FLAGS 0x07

owner member bit[6: 63] Meaning: Save the thread pointer to obtain this mutex, that is, the address of the struct task_struct structure

For the arm64 architecture, the struct task_struct structure address is aligned according to L1_CACHE_BYTES bytes. According to the following definition, it is 64-byte alignment, which means that the struct task_struct address is converted into binary and the last 6 bits are all 0;

\linux\src\arch\arm64\include\asm\cache.h
#define L1_CACHE_SHIFT (6)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)

This part of the assignment is implemented through the following function. According to the situation, curr = (unsigned long)current; is assigned to the owner member bit[6 : 63]

static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
{
unsigned long owner, curr = (unsigned long)current;
if (atomic_long_try_cmpxchg_acquire( & amp;lock->owner, & amp;owner, task | flags))
}

b. struct list_head wait_list member

struct list_head wait_list is the head of the linked list, mutex is a sleep lock. When the thread cannot obtain the lock and does not have optimistic spin conditions, it will be placed in this waiting queue and wait for the owner to release the lock.

struct mutex_waiter is a linked list node. A thread that cannot obtain the lock will define a struct mutex_waiter structure and insert the tail of this structure into the linked list. The specific definition is as follows:

struct mutex_waiter {
struct list_head list;
struct task_struct *task; /* Block the thread struct task_struct address, use the current variable */
struct ww_acquire_ctx *ww_ctx;
};

Insertion operation in linked list

__mutex_lock
__mutex_lock_common
struct mutex_waiter waiter;
__mutex_add_waiter(lock, & waiter, & lock->wait_list);

4.3.2, parsing two key members of mutex
crash> rd gpstKdMutex1
ffffffc008fba480: ffffff810791b080.....
crash> struct KdumpMutex_st.lock ffffff810791b080 -x
  lock = {
    owner = {
      counter = 0xffffff8106bf6c01
    },
    wait_list = {
      next = 0xffffffc013353de8,
      prev = 0xffffffc013353de8
    },
  }

a. Use the atomic_long_t owner member to parse out which thread currently holds the lock
owner = counter = 0xffffff8106bf6c01. According to the analysis in Chapter 4.3.1, it can be seen that this address will be the address of the struct task_struc structure of the thread holding the lock after clearing the lower 6 bits.

crash> struct task_struct.comm,pid 0xffffff8106bf6c00
comm = “mutex thread_1\000”,
pid = 5544,
crash>

b. Use the struct list_head wait_list member to parse which threads are waiting for the lock

The contents of the mutex linked list header are as follows. The next and prev pointers are the same, indicating that only one node is in the doubly linked list.

wait_list = {
next = 0xffffffc013353de8,
prev = 0xffffffc013353de8
},

Use the list mutex_waiter.list -s mutex_waiter.task -h 0xffffffc013353de8 command to traverse all nodes in the linked list and print out the mutex_waiter.task member of each node. That is, the thread pointer blocked in the linked list cannot be acquired. Only one thread below cannot obtain the lock and is blocked. The address of the thread struct task_struct structure is at 0xffffff8106bf3600.

crash> list mutex_waiter.list -s mutex_waiter.task -h 0xffffffc013353de8
ffffffc013353de8
task = 0xffffff8106bf3600,
ffffff810791b0c8
task = 0x0,

According to the blocked thread struct task_struct structure address at 0xffffff8106bf3600, use the struct command to view the name and pid of this thread;

crash> struct task_struct.comm,pid 0xffffff8106bf3600
comm = “mutex thread_2\000”,
pid = 5545,
crash>

4.4. Use the bt command to view the stack traceback of the blocked thread

Based on the thread currently holding the lock and the thread blocked due to failure to obtain the lock, analyze the cause of the deadlock and then specifically solve the deadlock problem.

The knowledge points of the article match the official knowledge files, and you can further learn relevant knowledge. CS entry skill treeLinux introductionFirst introduction to Linux 37281 people are learning the system