generated from USC-EE-250L-Spring-2023/lab-10
-
Notifications
You must be signed in to change notification settings - Fork 0
161 lines (133 loc) · 6.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
import time
import numpy as np
from typing import List, Optional
import threading
import pandas as pd
import requests
import plotly.express as px
def generate_data() -> List[int]:
"""Generate some random data."""
return np.random.randint(100, 10000, 1000).tolist()
def process1(data: List[int]) -> List[int]:
"""TODO: Document this function. What does it do? What are the inputs and outputs?"""
def foo(x):
"""Find the next largest prime number."""
while True:
x += 1
if all(x % i for i in range(2, x)):
return x
return [foo(x) for x in data]
def process2(data: List[int]) -> List[int]:
"""TODO: Document this function. What does it do? What are the inputs and outputs?"""
def foo(x):
"""Find the next largest prime number."""
while True:
x += 1
if int(np.sqrt(x)) ** 2 == x:
return x
return [foo(x) for x in data]
def final_process(data1: List[int], data2: List[int]) -> List[int]:
"""TODO: Document this function. What does it do? What are the inputs and outputs?"""
return np.mean([x - y for x, y in zip(data1, data2)])
offload_url = 'http://192.168.4.74:5000' # TODO: Change this to the IP address of your server
def run(offload: Optional[str] = None) -> float:
"""Run the program, offloading the specified function(s) to the server.
Args:
offload: Which function(s) to offload to the server. Can be None, 'process1', 'process2', or 'both'.
Returns:
float: the final result of the program.
"""
data = generate_data()
if offload is None: # in this case, we run the program locally
data1 = process1(data)
data2 = process2(data)
elif offload == 'process1':
data1 = None
def offload_process1(data):
nonlocal data1
url = f"{offload_url}/process1" # URL of the process1 endpoint on the server
data_dict = {"data": data} # create a dictionary containing the input data
response = requests.post(url, json=data_dict) # send a POST request to the server with the input data
# TODO: Send a POST request to the server with the input data
data1 = response.json()
thread = threading.Thread(target=offload_process1, args=(data,))
thread.start()
data2 = process2(data)
thread.join()
# Question 2: Why do we need to join the thread here?
# Question 3: Are the processing functions executing in parallel or just concurrently? What is the difference?
# See this article: https://oxylabs.io/blog/concurrency-vs-parallelism
# ChatGPT is also good at explaining the difference between parallel and concurrent execution!
# Make sure to cite any sources you use to answer this question.
elif offload == 'process2':
# TODO: Implement this case
data2 = None
def offload_process2(data):
nonlocal data2
url = f"{offload_url}/process2" # URL of the process1 endpoint on the server
data_dict = {"data": data} # create a dictionary containing the input data
response = requests.post(url, json=data_dict) # send a POST request to the server with the input data
# TODO: Send a POST request to the server with the input data
data2 = response.json()
thread = threading.Thread(target=offload_process2, args=(data,))
thread.start()
data1 = process1(data)
thread.join()
pass
elif offload == 'both':
# TODO: Implement this case
data1 = None
data2 = None
def offload_both(data):
nonlocal data1
nonlocal data2
url = f"{offload_url}/process1" # URL of the process1 endpoint on the server
data_dict = {"data": data} # create a dictionary containing the input data
response = requests.post(url, json=data_dict) # send a POST request to the server with the input data
# TODO: Send a POST request to the server with the input data
data1 = response.json()
url2 = f"{offload_url}/process2" # URL of the process1 endpoint on the server
response = requests.post(url2, json=data_dict) # send a POST request to the server with the input data
# TODO: Send a POST request to the server with the input data
data2 = response.json()
thread = threading.Thread(target=offload_both, args=(data,))
thread.start()
thread.join()
pass
ans = final_process(data1, data2)
return ans
def main():
modes = [None, 'process1', 'process2', 'both']
results = []
for mode in modes:
execution_times = []
for i in range(5):
start_time = time.time()
run(mode)
execution_time = time.time() - start_time
execution_times.append(execution_time)
print(f"Mode {mode}, run {i}: {execution_time:.2f}s")
results.append({'Mode': mode, 'Execution Time (s)': execution_times})
df = pd.DataFrame(results)
fig = px.bar(df, x='Mode', y='Execution Time (s)', error_y='Execution Time (s)').update_layout(
title='Execution Time for Different Offloading Modes',
xaxis_title='Offloading Mode',
yaxis_title='Execution Time (s)',
)
fig.write_image('makespan.png')
print(df.describe())
print(f"Best offloading mode: {df.iloc[df['Execution Time (s)'].idxmin()]['Mode']}")
print(f"Worst offloading mode: {df.iloc[df['Execution Time (s)'].idxmax()]['Mode']}")
# TODO: Run the program 5 times for each offloading mode, and record the total execution time
# Compute the mean and standard deviation of the execution times
# Hint: store the results in a pandas DataFrame, use previous labs as a reference
# TODO: Plot makespans (total execution time) as a bar chart with error bars
# Make sure to include a title and x and y labels
# TODO: save plot to "makespan.png"
# Question 4: What is the best offloading mode? Why do you think that is?
# Question 5: What is the worst offloading mode? Why do you think that is?
# Question 6: The processing functions in the example aren't very likely to be used in a real-world application.
# What kind of processing functions would be more likely to be used in a real-world application?
# When would you want to offload these functions to a server?
if __name__ == '__main__':
main()